def __init__(
     self,
     resource_dir,
     publish_dir,
     publish_url,
     src_desc_url=None,
     max_files_compressed=50000,
     write_separate_manifest=True,
     move_resources=False,
 ):
     """
     Initialize a new ZipSynchronizer.
     :param resource_dir: the source directory for resources
     :param publish_dir: the directory resources should be published to
     :param publish_url: public url pointing to publish dir
     :param src_desc_url: public url pointing to resource description
     :param max_files_compressed: the maximum number of resource files that should be compressed in one zip file
     :param write_separate_manifest: will each zip file be accompanied by a separate resourcedump manifest.
     :param move_resources: Do we move the zipped resources to publish_dir or simply delete them from resource_dir.
     :return:
     """
     Synchronizer.__init__(
         self,
         resource_dir,
         publish_dir,
         publish_url,
         src_desc_url,
         max_files_compressed,
         write_separate_manifest,
         move_resources,
     )
    def __init__(self, app):
        self.config = app.config
        sce = self.config['eth']
        if int(sce['pruning']) >= 0:
            self.db = RefcountDB(app.services.db)
            if "I am not pruning" in self.db.db:
                raise RuntimeError(
                    "The database in '{}' was initialized as non-pruning. "
                    "Can not enable pruning now.".format(
                        self.config['data_dir']))
            self.db.ttl = int(sce['pruning'])
            self.db.db.put("I am pruning", "1")
        else:
            self.db = app.services.db
            if "I am pruning" in self.db:
                raise RuntimeError(
                    "The database in '{}' was initialized as pruning. "
                    "Can not disable pruning now".format(
                        self.config['data_dir']))
            self.db.put("I am not pruning", "1")

        if 'network_id' in self.db:
            db_network_id = self.db.get('network_id')
            if db_network_id != str(sce['network_id']):
                raise RuntimeError(
                    "The database in '{}' was initialized with network id {} and can not be used "
                    "when connecting to network id {}. Please choose a different data directory."
                    .format(self.config['data_dir'], db_network_id,
                            sce['network_id']))

        else:
            self.db.put('network_id', str(sce['network_id']))
            self.db.commit()

        assert self.db is not None

        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        coinbase = app.services.accounts.coinbase
        env = Env(self.db, sce['block'])
        self.chain = Chain(env,
                           new_head_cb=self._on_new_head,
                           coinbase=coinbase)

        log.info('chain at', number=self.chain.head.number)
        if 'genesis_hash' in sce:
            assert sce['genesis_hash'] == self.chain.genesis.hex_hash(), \
                "Genesis hash mismatch.\n  Expected: %s\n  Got: %s" % (
                    sce['genesis_hash'], self.chain.genesis.hex_hash())

        self.synchronizer = Synchronizer(self, force_sync=None)

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.add_transaction_lock = gevent.lock.Semaphore()
        self.broadcast_filter = DuplicatesFilter()
        self.on_new_head_cbs = []
        self.on_new_head_candidate_cbs = []
        self.newblock_processing_times = deque(maxlen=1000)
示例#3
0
 def __init__(self):
     super(ChainManager, self).__init__()
     # initialized after configure
     self.miner = None
     self.blockchain = None
     self.synchronizer = Synchronizer(self)
     self.genesis = blocks.CachedBlock.create_cached(blocks.genesis())
示例#4
0
class DandelionApp:
    def __init__(self, config_file=None):
        self._config_manager = ConfigurationManager(config_file)

    def start_server(self):
        self._server = Server(
            self._config_manager.local_address,
            self._config_manager.local_port,
            self._config_manager.server,  # info_dict
            self._config_manager.content_db,
        )
        self._server.start()

    def start_content_synchronizer(self):
        self._synchronizer = Synchronizer(
            self._config_manager.local_address,
            self._config_manager.local_port,
            self._config_manager.type,
            self._config_manager.content_db,
        )
        self._synchronizer.start()

    def run_ui(self):

        self._ui = UI(
            self._config_manager.ui, self._config_manager.content_db, self._server, self._synchronizer  # dict
        )
        self._ui.run()

    def exit(self):
        self._synchronizer.stop()
        self._server.stop()
 def check(self, fixture):
     synchronizer = self.getSynchronizer()
     self.assertTrue(synchronizer.isSynced())
     self.assertTrue(
         Synchronizer(fixture + '/expected/', self.getRoot1()).isSynced())
     self.assertTrue(
         Synchronizer(fixture + '/expected/', self.getRoot2()).isSynced())
示例#6
0
class DandelionApp:

    def __init__(self, config_file=None):
        self._config_manager = ConfigurationManager(config_file)
    
    def start_server(self): 
        self._server = Server(self._config_manager.local_address, 
                              self._config_manager.local_port, 
                              self._config_manager.server,  #info_dict
                              self._config_manager.content_db) 
        self._server.start()
    
    def start_content_synchronizer(self): 
        self._synchronizer = Synchronizer(self._config_manager.local_address,
                                          self._config_manager.local_port,
                                          self._config_manager.type,
                                          self._config_manager.content_db)
        self._synchronizer.start()
    
    def run_ui(self): 
        
        self._ui = UI(self._config_manager.ui,  #dict 
                      self._config_manager.content_db,
                      self._server, 
                      self._synchronizer)
        self._ui.run()
    
    def exit(self):
        self._synchronizer.stop()
        self._server.stop()
示例#7
0
def select_request(syncr: Synchronizer,
                   all_requests: List[UpdateRequest]) -> None:
    """
    The subroutine to select an update request
    :param syncr: The synchronizer object.
    :param all_requests: List of all update requests.
    """
    def content_editor(content: str) -> str:
        tf_name = ""
        with tempfile.NamedTemporaryFile(suffix=".xml", delete=False) as tf:
            tf_name = tf.name
            tf.write(content.encode('utf-8'))

        if Cli.confirm("Edit the file before submission? "
                       "This will launch the default editor '%s'" % EDITOR):
            while True:
                if os.name == 'posix':
                    call([EDITOR, tf_name])
                else:
                    call(['cmd.exe', '/c', tf_name])

                if Cli.confirm("Finished editing and submit?"):
                    break

        with open(tf_name, 'r', encoding='utf-8') as f:
            new_content = f.read()
            return new_content

    def edit_request(r: UpdateRequest):
        r.title = Cli.input_str("Title", r.title)
        r.message = Cli.input_str("Message", r.message)
        r.reference = Cli.input_str("Reference", r.reference)

    while True:
        req_index = Cli.input_int("Select an update request", 0,
                                  len(all_requests) - 1)
        req = all_requests[req_index]

        # preview the changes in the request
        StyledPrint.update_request(req, req_index)

        options = [
            ("Edit & submit", lambda:
             (edit_request(req), syncr.submit(req, editor=content_editor),
              print("Created Github Pull Request #%d (%s)" %
                    (req.pullreq_num, req.pullreq_url)),
              all_requests.pop(req_index)), CliAction.back),
            ("Reject", lambda:
             (syncr.reject(req),
              print("Created and closed Github Pull Request #%d "
                    "(%s)" % (req.pullreq_num, req.pullreq_url)),
              all_requests.pop(req_index)), CliAction.back),
            ("Re-select", None, CliAction.back), ("Back", None, CliAction.exit)
        ]

        if not Cli.menu("", options):
            return
示例#8
0
 def configure(self, config, genesis=None):
     self.config = config
     logger.info('Opening chain @ %s', utils.get_db_path())
     db = self.blockchain = DB(utils.get_db_path())
     self.index = Index(db)
     if genesis:
         self._initialize_blockchain(genesis)
     logger.debug('Chain @ #%d %s', self.head.number, self.head.hex_hash())
     self.genesis = blocks.CachedBlock.create_cached(blocks.genesis())
     self.new_miner()
     self.synchronizer = Synchronizer(self)
    def __init__(self, app):
        self.config = app.config
        sce = self.config['eth']
        if int(sce['pruning']) >= 0:
            self.db = RefcountDB(app.services.db)
            if "I am not pruning" in self.db.db:
                raise Exception("This database was initialized as non-pruning."
                                " Kinda hard to start pruning now.")
            self.db.ttl = int(sce['pruning'])
            self.db.db.put("I am pruning", "1")
        else:
            self.db = app.services.db
            if "I am pruning" in self.db:
                raise Exception("This database was initialized as pruning."
                                " Kinda hard to stop pruning now.")
            self.db.put("I am not pruning", "1")

        if 'network_id' in self.db:
            db_network_id = self.db.get('network_id')
            if db_network_id != str(sce['network_id']):
                raise Exception(
                    "This database was initialized with network_id {} "
                    "and can not be used when connecting to network_id {}".
                    format(db_network_id, sce['network_id']))

        else:
            self.db.put('network_id', str(sce['network_id']))
            self.db.commit()

        assert self.db is not None

        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        coinbase = app.services.accounts.coinbase
        env = Env(self.db, sce['block'])
        self.chain = Chain(env,
                           new_head_cb=self._on_new_head,
                           coinbase=coinbase)

        log.info('chain at', number=self.chain.head.number)
        if 'genesis_hash' in sce:
            assert sce['genesis_hash'] == self.chain.genesis.hex_hash()

        self.synchronizer = Synchronizer(self, force_sync=None)

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.add_transaction_lock = gevent.lock.Semaphore()
        self.broadcast_filter = DuplicatesFilter()
        self.on_new_head_cbs = []
        self.on_new_head_candidate_cbs = []
        self.newblock_processing_times = deque(maxlen=1000)
示例#10
0
 def configure(self, config, genesis=None, db=None):
     self.config = config
     if not db:
         db_path = utils.db_path(config.get('misc', 'data_dir'))
         log.info('opening chain', db_path=db_path)
         db = self.blockchain = DB(db_path)
     self.blockchain = db
     self.index = Index(db)
     if genesis:
         self._initialize_blockchain(genesis)
     log.debug('chain @', head_hash=self.head)
     self.genesis = blocks.genesis(db=db)
     log.debug('got genesis', genesis_hash=self.genesis)
     self.new_miner()
     self.synchronizer = Synchronizer(self)
示例#11
0
    def __init__(self, app):
        self.config = app.config
        self.db = app.services.db
        assert self.db is not None
        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        self.chain = Chain(self.db, new_head_cb=self._on_new_head)
        self.synchronizer = Synchronizer(self, force_sync=None)
        self.chain.coinbase = privtoaddr(
            self.config['eth']['privkey_hex'].decode('hex'))

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.broadcast_filter = DuplicatesFilter()
def main():
    data = sys.argv[1]
    output_file = sys.argv[2]
    setup_logging("log_config.json")
    logging.info("Initializing")
    encoder = Encoder(synchronizer=Synchronizer())
    encoder.encode(data, output_file)
示例#13
0
 def __init__(self):
     super(ChainManager, self).__init__()
     # initialized after configure
     self.miner = None
     self.blockchain = None
     self.synchronizer = Synchronizer(self)
     self.genesis = blocks.CachedBlock.create_cached(blocks.genesis())
示例#14
0
    def __init__(self, app):
        self.config = app.config
        self.db = app.services.db
        assert self.db is not None
        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        coinbase = app.services.accounts.coinbase
        self.chain = Chain(self.db, new_head_cb=self._on_new_head, coinbase=coinbase)
        log.info('chain at', number=self.chain.head.number)
        self.synchronizer = Synchronizer(self, force_sync=None)

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.add_transaction_lock = gevent.lock.Semaphore()
        self.broadcast_filter = DuplicatesFilter()
示例#15
0
class TestClient(object):
    def queue_callback_function(self, output):
        for entity in output:
            self.queue.put(entity)

    def print_queue(self):
        while True:
            entity = self.queue.get()
            print(entity)

    def __init__(self):
        self.queue = multiprocessing.Queue()
        self.synchronizer = Synchronizer(self.queue)
        self.worker = multiprocessing.Process(target=self.print_queue)

    def get_all(self):
        self.synchronizer.get_all()
示例#16
0
 def start_content_synchronizer(self):
     self._synchronizer = Synchronizer(
         self._config_manager.local_address,
         self._config_manager.local_port,
         self._config_manager.type,
         self._config_manager.content_db,
     )
     self._synchronizer.start()
示例#17
0
    def __init__(self, app):
        self.config = app.config
        sce = self.config['eth']
        if int(sce['pruning']) >= 0:
            self.db = RefcountDB(app.services.db)
            if "I am not pruning" in self.db.db:
                raise Exception("This database was initialized as non-pruning."
                                " Kinda hard to start pruning now.")
            self.db.ttl = int(sce['pruning'])
            self.db.db.put("I am pruning", "1")
        else:
            self.db = app.services.db
            if "I am pruning" in self.db:
                raise Exception("This database was initialized as pruning."
                                " Kinda hard to stop pruning now.")
            self.db.put("I am not pruning", "1")
        assert self.db is not None
        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        coinbase = app.services.accounts.coinbase
        if sce['genesis']:
            log.info('loading genesis', path=sce['genesis'])
            _json = json.load(open(sce['genesis']))
        else:
            log.info('loaded default genesis alloc')
            _json = None
        _genesis = genesis(self.db, json=_json)
        log.info('created genesis block', hash=encode_hex(_genesis.hash))
        self.chain = Chain(self.db,
                           genesis=_genesis,
                           new_head_cb=self._on_new_head,
                           coinbase=coinbase)
        log.info('chain at', number=self.chain.head.number)
        self.synchronizer = Synchronizer(self, force_sync=None)

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.add_transaction_lock = gevent.lock.Semaphore()
        self.broadcast_filter = DuplicatesFilter()
        self.on_new_head_cbs = []
        self.on_new_head_candidate_cbs = []
        self.newblock_processing_times = deque(maxlen=1000)
示例#18
0
    def DoRemoveProxy(self):
        errors = []
        try:
            logging.info('Removing bash...')
            backend.remove_bash()
        except Exception as e:
            errors.append(e)
        try:
            logging.info('Removing environment...')
            backend.remove_environment()
        except Exception as e:
            errors.append(e)
        try:
            logging.info('Removing apt...')
            backend.remove_apt()
        except Exception as e:
            errors.append(e)
        try:
            logging.info('Removing gsettings...')
            backend.remove_gsettings()
        except Exception as e:
            errors.append(e)
        try:
            logging.info('Removing sudoers...')
            backend.remove_sudoers()
        except Exception as e:
            errors.append(e)

        # Finalize
        if errors:
            logging.error('The following errors occured while removing proxy '
                          'settings\n{}'.format('\n'.join(errors)))
        else:
            logging.info('Proxy settings were succesfully removed.')
            okbox = Synchronizer(wx.MessageBox,
                                 args=('Proxy settings were succesfully '
                                       'removed. You might have to restart '
                                       'your browser or any other '
                                       'applications for changes to take '
                                       'effect.', 'Settings Removed'),
                                 kwargs={'style': wx.OK})
            okbox.run()
示例#19
0
 def configure(self, config, genesis=None):
     self.config = config
     logger.info('Opening chain @ %s', utils.get_db_path())
     db = self.blockchain = DB(utils.get_db_path())
     self.index = Index(db)
     if genesis:
         self._initialize_blockchain(genesis)
     logger.debug('Chain @ #%d %s', self.head.number, self.head.hex_hash())
     self.genesis = blocks.CachedBlock.create_cached(blocks.genesis())
     self.new_miner()
     self.synchronizer = Synchronizer(self)
 def __init__(self,
              resource_dir,
              publish_dir,
              publish_url,
              src_desc_url=None,
              max_files_compressed=50000,
              write_separate_manifest=True,
              move_resources=False):
     """
     Initialize a new ZipSynchronizer.
     :param resource_dir: the source directory for resources
     :param publish_dir: the directory resources should be published to
     :param publish_url: public url pointing to publish dir
     :param src_desc_url: public url pointing to resource description
     :param max_files_compressed: the maximum number of resource files that should be compressed in one zip file
     :param write_separate_manifest: will each zip file be accompanied by a separate resourcedump manifest.
     :param move_resources: Do we move the zipped resources to publish_dir or simply delete them from resource_dir.
     :return:
     """
     Synchronizer.__init__(self, resource_dir, publish_dir, publish_url,
                           src_desc_url, max_files_compressed,
                           write_separate_manifest, move_resources)
示例#21
0
    def __init__(self, app):
        self.config = app.config
        sce = self.config['eth']
        if int(sce['pruning']) >= 0:
            self.db = RefcountDB(app.services.db)
            if "I am not pruning" in self.db.db:
                raise Exception("This database was initialized as non-pruning."
                                " Kinda hard to start pruning now.")
            self.db.ttl = int(sce['pruning'])
            self.db.db.put("I am pruning", "1")
        else:
            self.db = app.services.db
            if "I am pruning" in self.db:
                raise Exception("This database was initialized as pruning."
                                " Kinda hard to stop pruning now.")
            self.db.put("I am not pruning", "1")

        if 'network_id' in self.db:
            db_network_id = self.db.get('network_id')
            if db_network_id != str(sce['network_id']):
                raise Exception("This database was initialized with network_id {} "
                                "and can not be used when connecting to network_id {}".format(
                                    db_network_id, sce['network_id'])
                                )

        else:
            self.db.put('network_id', str(sce['network_id']))
            self.db.commit()

        assert self.db is not None

        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        coinbase = app.services.accounts.coinbase
        env = Env(self.db, sce['block'])
        self.chain = Chain(env, new_head_cb=self._on_new_head, coinbase=coinbase)

        log.info('chain at', number=self.chain.head.number)
        if 'genesis_hash' in sce:
            assert sce['genesis_hash'] == self.chain.genesis.hex_hash()

        self.synchronizer = Synchronizer(self, force_sync=None)

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.add_transaction_lock = gevent.lock.Semaphore()
        self.broadcast_filter = DuplicatesFilter()
        self.on_new_head_cbs = []
        self.on_new_head_candidate_cbs = []
        self.newblock_processing_times = deque(maxlen=1000)
示例#22
0
def main(argv):
    mode, serialPort, command, duration, outputFile = parseArguments(argv)
    
    synch = Synchronizer(serialPort, outputFile)
    
    if mode == RUN_MODE:
        if duration == None:
            synch.doRun(command)
        else:
            synch.doThreadedRun(command, duration)
    else:
        synch.doIdle(duration)
示例#23
0
    def __init__(self, app):
        self.config = app.config
        self.db = app.services.db
        assert self.db is not None
        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        self.chain = Chain(self.db, new_head_cb=self._on_new_head)
        self.synchronizer = Synchronizer(self, force_sync=None)
        self.chain.coinbase = privtoaddr(self.config['eth']['privkey_hex'].decode('hex'))

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.broadcast_filter = DuplicatesFilter()
示例#24
0
def main():
    record_time = int(sys.argv[1])
    output_file = sys.argv[2]
    setup_logging("log_config.json")
    logging.info("Initializing")
    synchronizer = Synchronizer()
    wav_recorder = Recorder(record_format=pyaudio.paInt16,
                            channels=1,
                            sample_rate=synchronizer.sample_rate,
                            chunk_size=1024)
    data_decoder = Decoder(synchronizer=synchronizer)
    wav_recorder.record_to_wav(output_file, record_time)
    data = data_decoder.decode(output_file)
    logging.info("Received data:\n{0}".format(data))
示例#25
0
 def configure(self, config, genesis=None, db=None):
     self.config = config
     if not db:
         db_path = utils.db_path(config.get('misc', 'data_dir'))
         log.info('opening chain', db_path=db_path)
         db = self.blockchain = DB(db_path)
     self.blockchain = db
     self.index = Index(db)
     if genesis:
         self._initialize_blockchain(genesis)
     log.debug('chain @', head_hash=self.head)
     self.genesis = blocks.genesis(db=db)
     log.debug('got genesis', genesis_hash=self.genesis)
     self.new_miner()
     self.synchronizer = Synchronizer(self)
示例#26
0
    def __init__(self, app):
        self.config = app.config
        sce = self.config['eth']
        if int(sce['pruning']) >= 0:
            self.db = RefcountDB(app.services.db)
            if "I am not pruning" in self.db.db:
                raise Exception("This database was initialized as non-pruning."
                                " Kinda hard to start pruning now.")
            self.db.ttl = int(sce['pruning'])
            self.db.db.put("I am pruning", "1")
        else:
            self.db = app.services.db
            if "I am pruning" in self.db:
                raise Exception("This database was initialized as pruning."
                                " Kinda hard to stop pruning now.")
            self.db.put("I am not pruning", "1")
        assert self.db is not None
        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        coinbase = app.services.accounts.coinbase
        try:
            _json = json.load(open(sce['genesis']))
            log.info('loading genesis', filename=sce['genesis'])
        except Exception as e:
            log.warn(str(e))
            _json = GENESIS_JSON
            log.info('loaded default genesis alloc')
        _genesis = genesis(self.db, json=_json)
        log.info('created genesis block', hash=encode_hex(_genesis.hash))
        self.chain = Chain(self.db, genesis=_genesis, new_head_cb=self._on_new_head,
                           coinbase=coinbase)
        log.info('chain at', number=self.chain.head.number)
        self.synchronizer = Synchronizer(self, force_sync=None)

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.add_transaction_lock = gevent.lock.Semaphore()
        self.broadcast_filter = DuplicatesFilter()
        self.on_new_head_cbs = []
        self.on_new_head_candidate_cbs = []
        self.newblock_processing_times = deque(maxlen=1000)
示例#27
0
    def __init__(self, app):
        self.config = app.config
        self.db = app.services.db
        assert self.db is not None
        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        coinbase = app.services.accounts.coinbase
        _genesis = genesis(self.db, nonce=self.config['eth']['genesis_nonce'].decode('hex'))
        self.chain = Chain(self.db, genesis=_genesis, new_head_cb=self._on_new_head,
                           coinbase=coinbase)
        log.info('chain at', number=self.chain.head.number)
        self.synchronizer = Synchronizer(self, force_sync=None)

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.add_transaction_lock = gevent.lock.Semaphore()
        self.broadcast_filter = DuplicatesFilter()
        self.on_new_head_cbs = []
        self.on_new_head_candidate_cbs = []
        self.newblock_processing_times = deque(maxlen=1000)
class ChainService(WiredService):
    """
    Manages the chain and requests to it.
    """
    # required by BaseService
    name = 'chain'
    default_config = dict(eth=dict(network_id=0, genesis='', pruning=-1),
                          block=ethereum_config.default_config)

    # required by WiredService
    wire_protocol = eth_protocol.ETHProtocol  # create for each peer

    # initialized after configure:
    chain = None
    genesis = None
    synchronizer = None
    config = None
    block_queue_size = 1024
    transaction_queue_size = 1024
    processed_gas = 0
    processed_elapsed = 0

    def __init__(self, app):
        self.config = app.config
        sce = self.config['eth']
        if int(sce['pruning']) >= 0:
            self.db = RefcountDB(app.services.db)
            if "I am not pruning" in self.db.db:
                raise Exception("This database was initialized as non-pruning."
                                " Kinda hard to start pruning now.")
            self.db.ttl = int(sce['pruning'])
            self.db.db.put("I am pruning", "1")
        else:
            self.db = app.services.db
            if "I am pruning" in self.db:
                raise Exception("This database was initialized as pruning."
                                " Kinda hard to stop pruning now.")
            self.db.put("I am not pruning", "1")

        if 'network_id' in self.db:
            db_network_id = self.db.get('network_id')
            if db_network_id != str(sce['network_id']):
                raise Exception(
                    "This database was initialized with network_id {} "
                    "and can not be used when connecting to network_id {}".
                    format(db_network_id, sce['network_id']))

        else:
            self.db.put('network_id', str(sce['network_id']))
            self.db.commit()

        assert self.db is not None

        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        coinbase = app.services.accounts.coinbase
        env = Env(self.db, sce['block'])
        self.chain = Chain(env,
                           new_head_cb=self._on_new_head,
                           coinbase=coinbase)

        log.info('chain at', number=self.chain.head.number)
        if 'genesis_hash' in sce:
            assert sce['genesis_hash'] == self.chain.genesis.hex_hash()

        self.synchronizer = Synchronizer(self, force_sync=None)

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.add_transaction_lock = gevent.lock.Semaphore()
        self.broadcast_filter = DuplicatesFilter()
        self.on_new_head_cbs = []
        self.on_new_head_candidate_cbs = []
        self.newblock_processing_times = deque(maxlen=1000)

    @property
    def is_syncing(self):
        return self.synchronizer.synctask is not None

    @property
    def is_mining(self):
        if 'pow' in self.app.services:
            return self.app.services.pow.active
        return False

    def _on_new_head(self, block):
        # DEBUG('new head cbs', len(self.on_new_head_cbs))
        for cb in self.on_new_head_cbs:
            cb(block)
        self._on_new_head_candidate(
        )  # we implicitly have a new head_candidate

    def _on_new_head_candidate(self):
        # DEBUG('new head candidate cbs', len(self.on_new_head_candidate_cbs))
        for cb in self.on_new_head_candidate_cbs:
            cb(self.chain.head_candidate)

    def add_transaction(self, tx, origin=None):
        if self.is_syncing:
            return  # we can not evaluate the tx based on outdated state
        log.debug('add_transaction',
                  locked=self.add_transaction_lock.locked(),
                  tx=tx)
        assert isinstance(tx, Transaction)
        assert origin is None or isinstance(origin, BaseProtocol)

        if tx.hash in self.broadcast_filter:
            log.debug('discarding known tx')  # discard early
            return

        # validate transaction
        try:
            validate_transaction(self.chain.head_candidate, tx)
            log.debug('valid tx, broadcasting')
            self.broadcast_transaction(tx, origin=origin)  # asap
        except InvalidTransaction as e:
            log.debug('invalid tx', error=e)
            return

        if origin is not None:  # not locally added via jsonrpc
            if not self.is_mining or self.is_syncing:
                log.debug('discarding tx',
                          syncing=self.is_syncing,
                          mining=self.is_mining)
                return

        self.add_transaction_lock.acquire()
        success = self.chain.add_transaction(tx)
        self.add_transaction_lock.release()
        if success:
            self._on_new_head_candidate()

    def add_block(self, t_block, proto):
        "adds a block to the block_queue and spawns _add_block if not running"
        self.block_queue.put((t_block, proto))  # blocks if full
        if not self.add_blocks_lock:
            self.add_blocks_lock = True  # need to lock here (ctx switch is later)
            gevent.spawn(self._add_blocks)

    def add_mined_block(self, block):
        log.debug('adding mined block', block=block)
        assert isinstance(block, Block)
        assert block.header.check_pow()
        if self.chain.add_block(block):
            log.debug('added', block=block, ts=time.time())
            assert block == self.chain.head
            self.broadcast_newblock(block,
                                    chain_difficulty=block.chain_difficulty())

    def knows_block(self, block_hash):
        "if block is in chain or in queue"
        if block_hash in self.chain:
            return True
        # check if queued or processed
        for i in range(len(self.block_queue.queue)):
            if block_hash == self.block_queue.queue[i][0].header.hash:
                return True
        return False

    def _add_blocks(self):
        log.debug('add_blocks',
                  qsize=self.block_queue.qsize(),
                  add_tx_lock=self.add_transaction_lock.locked())
        assert self.add_blocks_lock is True
        self.add_transaction_lock.acquire()
        try:
            while not self.block_queue.empty():
                t_block, proto = self.block_queue.peek(
                )  # peek: knows_block while processing
                if t_block.header.hash in self.chain:
                    log.warn('known block', block=t_block)
                    self.block_queue.get()
                    continue
                if t_block.header.prevhash not in self.chain:
                    log.warn('missing parent',
                             block=t_block,
                             head=self.chain.head)
                    self.block_queue.get()
                    continue
                # FIXME, this is also done in validation and in synchronizer for new_blocks
                if not t_block.header.check_pow():
                    log.warn('invalid pow', block=t_block, FIXME='ban node')
                    sentry.warn_invalid(t_block, 'InvalidBlockNonce')
                    self.block_queue.get()
                    continue
                try:  # deserialize
                    st = time.time()
                    block = t_block.to_block(env=self.chain.env)
                    elapsed = time.time() - st
                    log.debug('deserialized',
                              elapsed='%.4fs' % elapsed,
                              ts=time.time(),
                              gas_used=block.gas_used,
                              gpsec=self.gpsec(block.gas_used, elapsed))
                except processblock.InvalidTransaction as e:
                    log.warn('invalid transaction',
                             block=t_block,
                             error=e,
                             FIXME='ban node')
                    errtype = \
                        'InvalidNonce' if isinstance(e, InvalidNonce) else \
                        'NotEnoughCash' if isinstance(e, InsufficientBalance) else \
                        'OutOfGasBase' if isinstance(e, InsufficientStartGas) else \
                        'other_transaction_error'
                    sentry.warn_invalid(t_block, errtype)
                    self.block_queue.get()
                    continue
                except VerificationFailed as e:
                    log.warn('verification failed', error=e, FIXME='ban node')
                    sentry.warn_invalid(t_block, 'other_block_error')
                    self.block_queue.get()
                    continue
                # Check canary
                score = 0
                for address in canary_addresses:
                    if block.get_storage_data(address, 1) > 0:
                        score += 1
                if score >= 2:
                    log.warn('canary triggered')
                    continue
                # All checks passed
                log.debug('adding', block=block, ts=time.time())
                if self.chain.add_block(
                        block, forward_pending_transactions=self.is_mining):
                    now = time.time()
                    log.info('added',
                             block=block,
                             txs=block.transaction_count,
                             gas_used=block.gas_used)
                    if t_block.newblock_timestamp:
                        total = now - t_block.newblock_timestamp
                        self.newblock_processing_times.append(total)
                        avg = statistics.mean(self.newblock_processing_times)
                        med = statistics.median(self.newblock_processing_times)
                        max_ = max(self.newblock_processing_times)
                        min_ = min(self.newblock_processing_times)
                        log.info('processing time',
                                 last=total,
                                 avg=avg,
                                 max=max_,
                                 min=min_,
                                 median=med)
                else:
                    log.warn('could not add', block=block)

                self.block_queue.get(
                )  # remove block from queue (we peeked only)
                gevent.sleep(0.001)
        finally:
            self.add_blocks_lock = False
            self.add_transaction_lock.release()

    def gpsec(self, gas_spent=0, elapsed=0):
        if gas_spent:
            self.processed_gas += gas_spent
            self.processed_elapsed += elapsed
        return int(self.processed_gas / (0.001 + self.processed_elapsed))

    def broadcast_newblock(self, block, chain_difficulty=None, origin=None):
        if not chain_difficulty:
            assert block.hash in self.chain
            chain_difficulty = block.chain_difficulty()
        assert isinstance(block, (eth_protocol.TransientBlock, Block))
        if self.broadcast_filter.update(block.header.hash):
            log.debug('broadcasting newblock', origin=origin)
            bcast = self.app.services.peermanager.broadcast
            bcast(eth_protocol.ETHProtocol,
                  'newblock',
                  args=(block, chain_difficulty),
                  exclude_peers=[origin.peer] if origin else [])
        else:
            log.debug('already broadcasted block')

    def broadcast_transaction(self, tx, origin=None):
        assert isinstance(tx, Transaction)
        if self.broadcast_filter.update(tx.hash):
            log.debug('broadcasting tx', origin=origin)
            bcast = self.app.services.peermanager.broadcast
            bcast(eth_protocol.ETHProtocol,
                  'transactions',
                  args=(tx, ),
                  exclude_peers=[origin.peer] if origin else [])
        else:
            log.debug('already broadcasted tx')

    # wire protocol receivers ###########

    def on_wire_protocol_start(self, proto):
        log.debug('----------------------------------')
        log.debug('on_wire_protocol_start', proto=proto)
        assert isinstance(proto, self.wire_protocol)
        # register callbacks
        proto.receive_status_callbacks.append(self.on_receive_status)
        proto.receive_transactions_callbacks.append(
            self.on_receive_transactions)
        proto.receive_getblockhashes_callbacks.append(
            self.on_receive_getblockhashes)
        proto.receive_blockhashes_callbacks.append(self.on_receive_blockhashes)
        proto.receive_getblocks_callbacks.append(self.on_receive_getblocks)
        proto.receive_blocks_callbacks.append(self.on_receive_blocks)
        proto.receive_newblock_callbacks.append(self.on_receive_newblock)
        proto.receive_newblockhashes_callbacks.append(self.on_newblockhashes)

        # send status
        head = self.chain.head
        proto.send_status(chain_difficulty=head.chain_difficulty(),
                          chain_head_hash=head.hash,
                          genesis_hash=self.chain.genesis.hash)

    def on_wire_protocol_stop(self, proto):
        assert isinstance(proto, self.wire_protocol)
        log.debug('----------------------------------')
        log.debug('on_wire_protocol_stop', proto=proto)

    def on_receive_status(self, proto, eth_version, network_id,
                          chain_difficulty, chain_head_hash, genesis_hash):
        log.debug('----------------------------------')
        log.debug('status received', proto=proto, eth_version=eth_version)
        assert eth_version == proto.version, (eth_version, proto.version)
        if network_id != self.config['eth'].get('network_id',
                                                proto.network_id):
            log.warn("invalid network id",
                     remote_network_id=network_id,
                     expected_network_id=self.config['eth'].get(
                         'network_id', proto.network_id))
            raise eth_protocol.ETHProtocolError('wrong network_id')

        # check genesis
        if genesis_hash != self.chain.genesis.hash:
            log.warn("invalid genesis hash",
                     remote_id=proto,
                     genesis=genesis_hash.encode('hex'))
            raise eth_protocol.ETHProtocolError('wrong genesis block')

        # request chain
        self.synchronizer.receive_status(proto, chain_head_hash,
                                         chain_difficulty)

        # send transactions
        transactions = self.chain.get_transactions()
        if transactions:
            log.debug("sending transactions", remote_id=proto)
            proto.send_transactions(*transactions)

    # transactions

    def on_receive_transactions(self, proto, transactions):
        "receives rlp.decoded serialized"
        log.debug('----------------------------------')
        log.debug('remote_transactions_received',
                  count=len(transactions),
                  remote_id=proto)
        for tx in transactions:
            self.add_transaction(tx, origin=proto)

    # blockhashes ###########

    def on_newblockhashes(self, proto, newblockhashes):
        """
        msg sent out if not the full block is propagated
        chances are high, that we get the newblock, though.
        """
        log.debug('----------------------------------')
        log.debug("recv newnewblockhashes",
                  num=len(newblockhashes),
                  remote_id=proto)
        assert len(newblockhashes) <= 32
        self.synchronizer.receive_newblockhashes(proto, newblockhashes)

    def on_receive_getblockhashes(self, proto, child_block_hash, count):
        log.debug('----------------------------------')
        log.debug("handle_get_blockhashes",
                  count=count,
                  block_hash=encode_hex(child_block_hash))
        max_hashes = min(count, self.wire_protocol.max_getblockhashes_count)
        found = []
        if child_block_hash not in self.chain:
            log.debug("unknown block")
            proto.send_blockhashes(*[])
            return

        last = child_block_hash
        while len(found) < max_hashes:
            try:
                last = rlp.decode_lazy(
                    self.chain.db.get(last))[0][0]  # [head][prevhash]
            except KeyError:
                # this can happen if we started a chain download, which did not complete
                # should not happen if the hash is part of the canonical chain
                log.warn('KeyError in getblockhashes', hash=last)
                break
            if last:
                found.append(last)
            else:
                break

        log.debug("sending: found block_hashes", count=len(found))
        proto.send_blockhashes(*found)

    def on_receive_blockhashes(self, proto, blockhashes):
        log.debug('----------------------------------')
        if blockhashes:
            log.debug("on_receive_blockhashes",
                      count=len(blockhashes),
                      remote_id=proto,
                      first=encode_hex(blockhashes[0]),
                      last=encode_hex(blockhashes[-1]))
        else:
            log.debug("recv 0 remote block hashes, signifying genesis block")
        self.synchronizer.receive_blockhashes(proto, blockhashes)

    # blocks ################

    def on_receive_getblocks(self, proto, blockhashes):
        log.debug('----------------------------------')
        log.debug("on_receive_getblocks", count=len(blockhashes))
        found = []
        for bh in blockhashes[:self.wire_protocol.max_getblocks_count]:
            try:
                found.append(self.chain.db.get(bh))
            except KeyError:
                log.debug("unknown block requested", block_hash=encode_hex(bh))
        if found:
            log.debug("found", count=len(found))
            proto.send_blocks(*found)

    def on_receive_blocks(self, proto, transient_blocks):
        log.debug('----------------------------------')
        blk_number = max(x.header.number
                         for x in transient_blocks) if transient_blocks else 0
        log.debug("recv blocks",
                  count=len(transient_blocks),
                  remote_id=proto,
                  highest_number=blk_number)
        if transient_blocks:
            self.synchronizer.receive_blocks(proto, transient_blocks)

    def on_receive_newblock(self, proto, block, chain_difficulty):
        log.debug('----------------------------------')
        log.debug("recv newblock", block=block, remote_id=proto)
        self.synchronizer.receive_newblock(proto, block, chain_difficulty)

    def on_receive_getblockheaders(self, proto, blockhashes):
        log.debug('----------------------------------')
        log.debug("on_receive_getblockheaders", count=len(blockhashes))
        found = []
        for bh in blockhashes[:self.wire_protocol.max_getblocks_count]:
            try:
                found.append(rlp.encode(rlp.decode(self.chain.db.get(bh))[0]))
            except KeyError:
                log.debug("unknown block requested", block_hash=encode_hex(bh))
        if found:
            log.debug("found", count=len(found))
            proto.send_blockheaders(*found)

    def on_receive_blockheaders(self, proto, transient_blocks):
        log.debug('----------------------------------')
        pass
        # TODO: implement headers first syncing

    def on_receive_hashlookup(self, proto, hashes):
        found = []
        for h in hashes:
            try:
                found.append(
                    utils.encode_hex(
                        self.chain.db.get('node:' + utils.decode_hex(h))))
            except KeyError:
                found.append('')
        proto.send_hashlookupresponse(h)

    def on_receive_hashlookupresponse(self, proto, hashresponses):
        pass
示例#29
0
文件: main.py 项目: jbruns/smartchime
                state_tracker.controlsLockCycles = state_tracker.controlsLockCycles - 1
                state_tracker.must_refresh = True

        # physical controls take precedence over the "normal" widget display, so don't take away the display lock if the controls haven't released it.
        if state_tracker.oled_enabled and state_tracker.controlsLockCycles == 0:
            # Advance the scrolling widgets.
            for scroller in scrollers:
                vars()[scroller].tick()

            if state_tracker.must_refresh:
                state_tracker.must_refresh = False
                # clear the active scrolling widgets and reset the synchronizer.
                for scroller in scrollers:
                    del vars()[scroller]
                scrollers = []
                synchronizer = Synchronizer()

                # Arrange widgets on the ImageComposition, per the config. This is done one row at a time as follows:
                #   - set the y-coordinate for the row.
                #   - for each configured widget, call the WidgetFactory to create the widget content for eventual placement on the ImageComposition.
                #   - place each widget according to column identifier (1-4):
                #       - 1: left justified.
                #       - 4: right justified.
                #       - columns 2 and 3 make use of a little extra logic: if both exist, split them evenly across the center of the display. if only one is present, center it.
                #   - position the widgets (icon + text) according to the x/y coordinates that have been determined.
                #   - refresh the ImageComposition to make the new positions take effect.
                #   - enable scrolling for widgets that declare as such in their config.
                r = 0
                for row in oled_config['arrangement'][0]:
                    r += 1
                    row_columns = len(vars()[row][0]['columns'][0])
示例#30
0
class ChainService(WiredService):
    """
    Manages the chain and requests to it.
    """
    # required by BaseService
    name = 'chain'
    default_config = dict(eth=dict(privkey_hex=''))

    # required by WiredService
    wire_protocol = eth_protocol.ETHProtocol  # create for each peer

    # initialized after configure:
    chain = None
    genesis = None
    synchronizer = None
    config = None
    block_queue_size = 1024
    transaction_queue_size = 1024

    def __init__(self, app):
        self.config = app.config
        self.db = app.services.db
        assert self.db is not None
        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        self.chain = Chain(self.db, new_head_cb=self._on_new_head)
        self.synchronizer = Synchronizer(self, force_sync=None)
        self.chain.coinbase = privtoaddr(
            self.config['eth']['privkey_hex'].decode('hex'))

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.broadcast_filter = DuplicatesFilter()

    def _on_new_head(self, block):
        pass

    def add_block(self, t_block, proto):
        "adds a block to the block_queue and spawns _add_block if not running"
        self.block_queue.put((t_block, proto))  # blocks if full
        if not self.add_blocks_lock:
            self.add_blocks_lock = True
            gevent.spawn(self._add_blocks)

    def _add_blocks(self):
        log.debug('add_blocks', qsize=self.block_queue.qsize())
        try:
            while not self.block_queue.empty():
                t_block, proto = self.block_queue.get()
                if t_block.header.hash in self.chain:
                    log.warn('known block', block=t_block)
                    continue
                if t_block.header.prevhash not in self.chain:
                    log.warn('missing parent', block=t_block)
                    continue
                if not t_block.header.check_pow():
                    log.warn('invalid pow', block=t_block)
                    # FIXME ban node
                    continue
                try:  # deserialize
                    st = time.time()
                    block = t_block.to_block(db=self.chain.db)
                    elapsed = time.time() - st
                    log.debug('deserialized',
                              elapsed='%.2fs' % elapsed,
                              gas_used=block.gas_used,
                              gpsec=int(block.gas_used / elapsed))
                except processblock.InvalidTransaction as e:
                    log.warn('invalid transaction', block=t_block, error=e)
                    # FIXME ban node
                    continue

                if self.chain.add_block(block):
                    log.debug('added', block=block)
                gevent.sleep(0.001)
        finally:
            self.add_blocks_lock = False

    def broadcast_newblock(self, block, chain_difficulty, origin=None):
        assert isinstance(block, eth_protocol.TransientBlock)
        if self.broadcast_filter.known(block.header.hash):
            log.debug('already broadcasted block')
        else:
            log.debug('broadcasting newblock', origin=origin)
            bcast = self.app.services.peermanager.broadcast
            bcast(eth_protocol.ETHProtocol,
                  'newblock',
                  args=(block, chain_difficulty),
                  num_peers=None,
                  exclude_protos=[origin])

    # wire protocol receivers ###########

    def on_wire_protocol_start(self, proto):
        log.debug('on_wire_protocol_start', proto=proto)
        assert isinstance(proto, self.wire_protocol)
        # register callbacks
        proto.receive_status_callbacks.append(self.on_receive_status)
        proto.receive_transactions_callbacks.append(
            self.on_receive_transactions)
        proto.receive_getblockhashes_callbacks.append(
            self.on_receive_getblockhashes)
        proto.receive_blockhashes_callbacks.append(self.on_receive_blockhashes)
        proto.receive_getblocks_callbacks.append(self.on_receive_getblocks)
        proto.receive_blocks_callbacks.append(self.on_receive_blocks)
        proto.receive_newblock_callbacks.append(self.on_receive_newblock)

        # send status
        head = self.chain.head
        proto.send_status(chain_difficulty=head.chain_difficulty(),
                          chain_head_hash=head.hash,
                          genesis_hash=self.chain.genesis.hash)

    def on_wire_protocol_stop(self, proto):
        assert isinstance(proto, self.wire_protocol)
        log.debug('on_wire_protocol_stop', proto=proto)

    def on_receive_status(self, proto, eth_version, network_id,
                          chain_difficulty, chain_head_hash, genesis_hash):

        log.debug('status received', proto=proto, eth_version=eth_version)
        assert eth_version == proto.version, (eth_version, proto.version)
        if network_id != proto.network_id:
            log.warn("invalid network id",
                     remote_id=proto.network_id,
                     network_id=network_id)
            raise eth_protocol.ETHProtocolError('wrong network_id')

        # check genesis
        if genesis_hash != self.chain.genesis.hash:
            log.warn("invalid genesis hash",
                     remote_id=proto,
                     genesis=genesis_hash.encode('hex'))
            raise eth_protocol.ETHProtocolError('wrong genesis block')

        # request chain
        self.synchronizer.receive_status(proto, chain_head_hash,
                                         chain_difficulty)

        # send transactions
        transactions = self.chain.get_transactions()
        if transactions:
            log.debug("sending transactions", remote_id=proto)
            proto.send_transactions(*transactions)

    # transactions

    def on_receive_transactions(self, proto, transactions):
        "receives rlp.decoded serialized"
        log.debug('remote_transactions_received',
                  count=len(transactions),
                  remote_id=proto)
        log.debug('skipping, FIXME')
        return
        for tx in transactions:
            # fixme bloomfilter
            self.chain.add_transaction(tx)

    # blockhashes ###########

    def on_receive_getblockhashes(self, proto, child_block_hash, count):
        log.debug("handle_get_blockhashes",
                  count=count,
                  block_hash=encode_hex(child_block_hash))
        max_hashes = min(count, self.wire_protocol.max_getblockhashes_count)
        found = []
        if child_block_hash not in self.chain:
            log.debug("unknown block")
            proto.send_blockhashes(*[])
            return

        last = child_block_hash
        while len(found) < max_hashes:
            last = rlp.decode_lazy(self.chain.db.get(last))[0][0]
            if last:
                found.append(last)
            else:
                break

        log.debug("sending: found block_hashes", count=len(found))
        proto.send_blockhashes(*found)

    def on_receive_blockhashes(self, proto, blockhashes):
        if blockhashes:
            log.debug("on_receive_blockhashes",
                      count=len(blockhashes),
                      remote_id=proto,
                      first=encode_hex(blockhashes[0]),
                      last=encode_hex(blockhashes[-1]))
        else:
            log.debug("recv 0 remote block hashes, signifying genesis block")
        self.synchronizer.receive_blockhashes(proto, blockhashes)

    # blocks ################

    def on_receive_getblocks(self, proto, blockhashes):
        log.debug("on_receive_getblocks", count=len(blockhashes))
        found = []
        for bh in blockhashes[:self.wire_protocol.max_getblocks_count]:
            try:
                found.append(self.chain.db.get(bh))
            except KeyError:
                log.debug("unknown block requested", block_hash=encode_hex(bh))
        if found:
            log.debug("found", count=len(found))
            proto.send_blocks(*found)

    def on_receive_blocks(self, proto, transient_blocks):
        log.debug("recv blocks",
                  count=len(transient_blocks),
                  remote_id=proto,
                  highest_number=max(x.header.number
                                     for x in transient_blocks))
        if transient_blocks:
            self.synchronizer.receive_blocks(proto, transient_blocks)

    def on_receive_newblock(self, proto, block, chain_difficulty):
        log.debug("recv newblock", block=block, remote_id=proto)
        self.synchronizer.receive_newblock(proto, block, chain_difficulty)
示例#31
0
class ChainService(WiredService):

    """
    Manages the chain and requests to it.
    """
    # required by BaseService
    name = 'chain'
    default_config = dict(eth=dict(privkey_hex=''))

    # required by WiredService
    wire_protocol = eth_protocol.ETHProtocol  # create for each peer

    # initialized after configure:
    chain = None
    genesis = None
    synchronizer = None
    config = None
    block_queue_size = 1024
    transaction_queue_size = 1024

    def __init__(self, app):
        self.config = app.config
        self.db = app.services.db
        assert self.db is not None
        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        self.chain = Chain(self.db, new_head_cb=self._on_new_head)
        self.synchronizer = Synchronizer(self, force_sync=None)
        self.chain.coinbase = privtoaddr(self.config['eth']['privkey_hex'].decode('hex'))

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.broadcast_filter = DuplicatesFilter()

    def _on_new_head(self, block):
        pass

    def add_block(self, t_block, proto):
        "adds a block to the block_queue and spawns _add_block if not running"
        self.block_queue.put((t_block, proto))  # blocks if full
        if not self.add_blocks_lock:
            self.add_blocks_lock = True
            gevent.spawn(self._add_blocks)

    def _add_blocks(self):
        log.debug('add_blocks', qsize=self.block_queue.qsize())
        try:
            while not self.block_queue.empty():
                t_block, proto = self.block_queue.get()
                if t_block.header.hash in self.chain:
                    log.warn('known block', block=t_block)
                    continue
                if t_block.header.prevhash not in self.chain:
                    log.warn('missing parent', block=t_block)
                    continue
                if not t_block.header.check_pow():
                    log.warn('invalid pow', block=t_block)
                    # FIXME ban node
                    continue
                try:  # deserialize
                    st = time.time()
                    block = t_block.to_block(db=self.chain.db)
                    elapsed = time.time() - st
                    log.debug('deserialized', elapsed='%.2fs' % elapsed,
                              gas_used=block.gas_used, gpsec=int(block.gas_used / elapsed))
                except processblock.InvalidTransaction as e:
                    log.warn('invalid transaction', block=t_block, error=e)
                    # FIXME ban node
                    continue

                if self.chain.add_block(block):
                    log.debug('added', block=block)
                gevent.sleep(0.001)
        finally:
            self.add_blocks_lock = False

    def broadcast_newblock(self, block, chain_difficulty, origin=None):
        assert isinstance(block, eth_protocol.TransientBlock)
        if self.broadcast_filter.known(block.header.hash):
            log.debug('already broadcasted block')
        else:
            log.debug('broadcasting newblock', origin=origin)
            bcast = self.app.services.peermanager.broadcast
            bcast(eth_protocol.ETHProtocol, 'newblock', args=(block, chain_difficulty),
                  num_peers=None, exclude_protos=[origin])

    # wire protocol receivers ###########

    def on_wire_protocol_start(self, proto):
        log.debug('on_wire_protocol_start', proto=proto)
        assert isinstance(proto, self.wire_protocol)
        # register callbacks
        proto.receive_status_callbacks.append(self.on_receive_status)
        proto.receive_transactions_callbacks.append(self.on_receive_transactions)
        proto.receive_getblockhashes_callbacks.append(self.on_receive_getblockhashes)
        proto.receive_blockhashes_callbacks.append(self.on_receive_blockhashes)
        proto.receive_getblocks_callbacks.append(self.on_receive_getblocks)
        proto.receive_blocks_callbacks.append(self.on_receive_blocks)
        proto.receive_newblock_callbacks.append(self.on_receive_newblock)

        # send status
        head = self.chain.head
        proto.send_status(chain_difficulty=head.chain_difficulty(), chain_head_hash=head.hash,
                          genesis_hash=self.chain.genesis.hash)

    def on_wire_protocol_stop(self, proto):
        assert isinstance(proto, self.wire_protocol)
        log.debug('on_wire_protocol_stop', proto=proto)

    def on_receive_status(self, proto, eth_version, network_id, chain_difficulty, chain_head_hash,
                          genesis_hash):

        log.debug('status received', proto=proto, eth_version=eth_version)
        assert eth_version == proto.version, (eth_version, proto.version)
        if network_id != proto.network_id:
            log.warn("invalid network id", remote_id=proto.network_id, network_id=network_id)
            raise eth_protocol.ETHProtocolError('wrong network_id')

        # check genesis
        if genesis_hash != self.chain.genesis.hash:
            log.warn("invalid genesis hash", remote_id=proto, genesis=genesis_hash.encode('hex'))
            raise eth_protocol.ETHProtocolError('wrong genesis block')

        # request chain
        self.synchronizer.receive_status(proto, chain_head_hash, chain_difficulty)

        # send transactions
        transactions = self.chain.get_transactions()
        if transactions:
            log.debug("sending transactions", remote_id=proto)
            proto.send_transactions(*transactions)

    # transactions

    def on_receive_transactions(self, proto, transactions):
        "receives rlp.decoded serialized"
        log.debug('remote_transactions_received', count=len(transactions), remote_id=proto)
        log.debug('skipping, FIXME')
        return
        for tx in transactions:
            # fixme bloomfilter
            self.chain.add_transaction(tx)

    # blockhashes ###########

    def on_receive_getblockhashes(self, proto, child_block_hash, count):
        log.debug("handle_get_blockhashes", count=count, block_hash=encode_hex(child_block_hash))
        max_hashes = min(count, self.wire_protocol.max_getblockhashes_count)
        found = []
        if child_block_hash not in self.chain:
            log.debug("unknown block")
            proto.send_blockhashes(*[])
            return

        last = child_block_hash
        while len(found) < max_hashes:
            last = rlp.decode_lazy(self.chain.db.get(last))[0][0]
            if last:
                found.append(last)
            else:
                break

        log.debug("sending: found block_hashes", count=len(found))
        proto.send_blockhashes(*found)

    def on_receive_blockhashes(self, proto, blockhashes):
        if blockhashes:
            log.debug("on_receive_blockhashes", count=len(blockhashes), remote_id=proto,
                      first=encode_hex(blockhashes[0]), last=encode_hex(blockhashes[-1]))
        else:
            log.debug("recv 0 remote block hashes, signifying genesis block")
        self.synchronizer.receive_blockhashes(proto, blockhashes)

    # blocks ################

    def on_receive_getblocks(self, proto, blockhashes):
        log.debug("on_receive_getblocks", count=len(blockhashes))
        found = []
        for bh in blockhashes[:self.wire_protocol.max_getblocks_count]:
            try:
                found.append(self.chain.db.get(bh))
            except KeyError:
                log.debug("unknown block requested", block_hash=encode_hex(bh))
        if found:
            log.debug("found", count=len(found))
            proto.send_blocks(*found)

    def on_receive_blocks(self, proto, transient_blocks):
        log.debug("recv blocks", count=len(transient_blocks), remote_id=proto,
                  highest_number=max(x.header.number for x in transient_blocks))
        if transient_blocks:
            self.synchronizer.receive_blocks(proto, transient_blocks)

    def on_receive_newblock(self, proto, block, chain_difficulty):
        log.debug("recv newblock", block=block, remote_id=proto)
        self.synchronizer.receive_newblock(proto, block, chain_difficulty)
示例#32
0
 def __init__(self):
     self.queue = multiprocessing.Queue()
     self.synchronizer = Synchronizer(self.queue)
     self.worker = multiprocessing.Process(target=self.print_queue)
示例#33
0
class ChainService(WiredService):
    """
    Manages the chain and requests to it.
    """
    # required by BaseService
    name = 'chain'
    default_config = dict(eth=dict(network_id=0))

    # required by WiredService
    wire_protocol = eth_protocol.ETHProtocol  # create for each peer

    # initialized after configure:
    chain = None
    genesis = None
    synchronizer = None
    config = None
    block_queue_size = 1024
    transaction_queue_size = 1024
    processed_gas = 0
    processed_elapsed = 0

    def __init__(self, app):
        self.config = app.config
        self.db = app.services.db
        assert self.db is not None
        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        coinbase = app.services.accounts.coinbase
        self.chain = Chain(self.db,
                           new_head_cb=self._on_new_head,
                           coinbase=coinbase)
        log.info('chain at', number=self.chain.head.number)
        self.synchronizer = Synchronizer(self, force_sync=None)

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.add_transaction_lock = gevent.lock.Semaphore()
        self.broadcast_filter = DuplicatesFilter()
        self.on_new_head_cbs = []
        self.on_new_head_candidate_cbs = []

    @property
    def is_syncing(self):
        return self.synchronizer.synctask is not None

    def _on_new_head(self, block):
        for cb in self.on_new_head_cbs:
            cb(block)
        self._on_new_head_candidate(
        )  # we implicitly have a new head_candidate

    def _on_new_head_candidate(self):
        for cb in self.on_new_head_candidate_cbs:
            cb(self.chain.head_candidate)

    def add_transaction(self, tx, origin=None):
        assert isinstance(tx, Transaction)
        log.debug('add_transaction', locked=self.add_transaction_lock.locked())
        self.add_transaction_lock.acquire()
        success = self.chain.add_transaction(tx)
        self.add_transaction_lock.release()
        if success:
            self._on_new_head_candidate()
            self.broadcast_transaction(tx, origin=origin)  # asap

    def add_block(self, t_block, proto):
        "adds a block to the block_queue and spawns _add_block if not running"
        self.block_queue.put((t_block, proto))  # blocks if full
        if not self.add_blocks_lock:
            self.add_blocks_lock = True  # need to lock here (ctx switch is later)
            gevent.spawn(self._add_blocks)

    def add_mined_block(self, block):
        log.debug('adding mined block', block=block)
        assert block.check_pow()
        if self.chain.add_block(block):
            log.info('added', block=block, ts=time.time())
            assert block == self.chain.head
            self.broadcast_newblock(block,
                                    chain_difficulty=block.chain_difficulty())

    def knows_block(self, block_hash):
        "if block is in chain or in queue"
        if block_hash in self.chain:
            return True
        # check if queued or processed
        for i in range(len(self.block_queue.queue)):
            if block_hash == self.block_queue.queue[i][0].header.hash:
                return True
        return False

    def _add_blocks(self):
        log.debug('add_blocks',
                  qsize=self.block_queue.qsize(),
                  add_tx_lock=self.add_transaction_lock.locked())
        assert self.add_blocks_lock is True
        self.add_transaction_lock.acquire()
        try:
            while not self.block_queue.empty():
                t_block, proto = self.block_queue.peek(
                )  # peek: knows_block while processing
                if t_block.header.hash in self.chain:
                    log.warn('known block', block=t_block)
                    self.block_queue.get()
                    continue
                if t_block.header.prevhash not in self.chain:
                    log.warn('missing parent', block=t_block)
                    self.block_queue.get()
                    continue
                # FIXME, this is also done in validation and in synchronizer for new_blocks
                if not t_block.header.check_pow():
                    log.warn('invalid pow', block=t_block, FIXME='ban node')
                    self.block_queue.get()
                    continue
                try:  # deserialize
                    st = time.time()
                    block = t_block.to_block(db=self.chain.db)
                    elapsed = time.time() - st
                    log.debug('deserialized',
                              elapsed='%.4fs' % elapsed,
                              gas_used=block.gas_used,
                              gpsec=self.gpsec(block.gas_used, elapsed))
                except processblock.InvalidTransaction as e:
                    log.warn('invalid transaction',
                             block=t_block,
                             error=e,
                             FIXME='ban node')
                    self.block_queue.get()
                    continue
                except VerificationFailed as e:
                    log.warn('verification failed', error=e, FIXME='ban node')
                    self.block_queue.get()
                    continue

                if self.chain.add_block(block):
                    log.info('added', block=block, ts=time.time())
                self.block_queue.get(
                )  # remove block from queue (we peeked only)
                gevent.sleep(0.001)
        finally:
            self.add_blocks_lock = False
            self.add_transaction_lock.release()

    def gpsec(self, gas_spent=0, elapsed=0):
        self.processed_gas += gas_spent
        self.processed_elapsed += elapsed
        return int(self.processed_gas / (0.001 + self.processed_elapsed))

    def broadcast_newblock(self, block, chain_difficulty=None, origin=None):
        if not chain_difficulty:
            assert block.hash in self.chain
            chain_difficulty = block.chain_difficulty()
        assert isinstance(block, (eth_protocol.TransientBlock, Block))
        if self.broadcast_filter.known(block.header.hash):
            log.debug('already broadcasted block')
        else:
            log.debug('broadcasting newblock', origin=origin)
            bcast = self.app.services.peermanager.broadcast
            bcast(eth_protocol.ETHProtocol,
                  'newblock',
                  args=(block, chain_difficulty),
                  exclude_peers=[origin.peer] if origin else [])

    def broadcast_transaction(self, tx, origin=None):
        assert isinstance(tx, Transaction)
        if self.broadcast_filter.known(tx.hash):
            log.debug('already broadcasted tx')
        else:
            log.debug('broadcasting tx', origin=origin)
            bcast = self.app.services.peermanager.broadcast
            bcast(eth_protocol.ETHProtocol,
                  'transactions',
                  args=(tx, ),
                  exclude_peers=[origin.peer] if origin else [])

    # wire protocol receivers ###########

    def on_wire_protocol_start(self, proto):
        log.debug('on_wire_protocol_start', proto=proto)
        assert isinstance(proto, self.wire_protocol)
        # register callbacks
        proto.receive_status_callbacks.append(self.on_receive_status)
        proto.receive_transactions_callbacks.append(
            self.on_receive_transactions)
        proto.receive_getblockhashes_callbacks.append(
            self.on_receive_getblockhashes)
        proto.receive_blockhashes_callbacks.append(self.on_receive_blockhashes)
        proto.receive_getblocks_callbacks.append(self.on_receive_getblocks)
        proto.receive_blocks_callbacks.append(self.on_receive_blocks)
        proto.receive_newblock_callbacks.append(self.on_receive_newblock)

        # send status
        head = self.chain.head
        proto.send_status(chain_difficulty=head.chain_difficulty(),
                          chain_head_hash=head.hash,
                          genesis_hash=self.chain.genesis.hash)

    def on_wire_protocol_stop(self, proto):
        assert isinstance(proto, self.wire_protocol)
        log.debug('on_wire_protocol_stop', proto=proto)

    def on_receive_status(self, proto, eth_version, network_id,
                          chain_difficulty, chain_head_hash, genesis_hash):

        log.debug('status received', proto=proto, eth_version=eth_version)
        assert eth_version == proto.version, (eth_version, proto.version)
        if network_id != self.config['eth'].get('network_id',
                                                proto.network_id):
            log.warn("invalid network id",
                     remote_network_id=network_id,
                     expected_network_id=self.config['eth'].get(
                         'network_id', proto.network_id))
            raise eth_protocol.ETHProtocolError('wrong network_id')

        # check genesis
        if genesis_hash != self.chain.genesis.hash:
            log.warn("invalid genesis hash",
                     remote_id=proto,
                     genesis=genesis_hash.encode('hex'))
            raise eth_protocol.ETHProtocolError('wrong genesis block')

        # request chain
        self.synchronizer.receive_status(proto, chain_head_hash,
                                         chain_difficulty)

        # send transactions
        transactions = self.chain.get_transactions()
        if transactions:
            log.debug("sending transactions", remote_id=proto)
            proto.send_transactions(*transactions)

    # transactions

    def on_receive_transactions(self, proto, transactions):
        "receives rlp.decoded serialized"
        log.debug('remote_transactions_received',
                  count=len(transactions),
                  remote_id=proto)
        for tx in transactions:
            self.add_transaction(tx, origin=proto)

    # blockhashes ###########

    def on_receive_getblockhashes(self, proto, child_block_hash, count):
        log.debug("handle_get_blockhashes",
                  count=count,
                  block_hash=encode_hex(child_block_hash))
        max_hashes = min(count, self.wire_protocol.max_getblockhashes_count)
        found = []
        if child_block_hash not in self.chain:
            log.debug("unknown block")
            proto.send_blockhashes(*[])
            return

        last = child_block_hash
        while len(found) < max_hashes:
            try:
                last = rlp.decode_lazy(self.chain.db.get(last))[0][0]
            except KeyError:
                # this can happen if we started a chain download, which did not complete
                # should not happen if the hash is part of the canonical chain
                log.warn('KeyError in getblockhashes', hash=last)
                break
            if last:
                found.append(last)
            else:
                break

        log.debug("sending: found block_hashes", count=len(found))
        proto.send_blockhashes(*found)

    def on_receive_blockhashes(self, proto, blockhashes):
        if blockhashes:
            log.debug("on_receive_blockhashes",
                      count=len(blockhashes),
                      remote_id=proto,
                      first=encode_hex(blockhashes[0]),
                      last=encode_hex(blockhashes[-1]))
        else:
            log.debug("recv 0 remote block hashes, signifying genesis block")
        self.synchronizer.receive_blockhashes(proto, blockhashes)

    # blocks ################

    def on_receive_getblocks(self, proto, blockhashes):
        log.debug("on_receive_getblocks", count=len(blockhashes))
        found = []
        for bh in blockhashes[:self.wire_protocol.max_getblocks_count]:
            try:
                found.append(self.chain.db.get(bh))
            except KeyError:
                log.debug("unknown block requested", block_hash=encode_hex(bh))
        if found:
            log.debug("found", count=len(found))
            proto.send_blocks(*found)

    def on_receive_blocks(self, proto, transient_blocks):
        blk_number = max(x.header.number
                         for x in transient_blocks) if transient_blocks else 0
        log.debug("recv blocks",
                  count=len(transient_blocks),
                  remote_id=proto,
                  highest_number=blk_number)
        if transient_blocks:
            self.synchronizer.receive_blocks(proto, transient_blocks)

    def on_receive_newblock(self, proto, block, chain_difficulty):
        log.debug("recv newblock", block=block, remote_id=proto)
        self.synchronizer.receive_newblock(proto, block, chain_difficulty)
示例#34
0
def sync(config_file: str, max_auto_requests: int = 0):
    """
    Synchronize between OEC and NASA
    """
    local_requests = []

    def sync_callback(request: UpdateRequest):
        """Collects and display an update request"""
        local_requests.append(request)

    sync_object = Synchronizer(config_file)

    # automatic mode
    if max_auto_requests > 0:
        print("Starting auto-sync...")
        sync_object.sync(sync_callback, get_progress_callback())
        req_to_send = local_requests[:max_auto_requests]
        for req_idx, req in enumerate(req_to_send):
            try:
                print("\nSubmitting request %d...\t\t" % req_idx, end="")
                sync_object.submit(req)
                print("PR #%d (%s)" % (req.pullreq_num, req.pullreq_url))
            except Exception as ex:
                logging.exception(ex)
        return

    # interactive mode
    while True:
        # start the menu
        options = [
            ("Discard changes and exit the program", None, CliAction.exit),
            (
                "Synchronize",
                lambda:
                (local_requests.clear(),
                 sync_object.sync(sync_callback, get_progress_callback())),
                CliAction.back  # need to regenerate menu text
            ),
        ]
        if len(local_requests) > 0:
            options += [
                ("List local update requests", lambda: frozenset(
                    map(lambda tup: StyledPrint.update_request(tup[1], tup[0]),
                        enumerate(local_requests))), CliAction.stay),
                ("Select a local update request",
                 lambda: select_request(sync_object, local_requests),
                 CliAction.back),
            ]

        title = "\n" \
                "+======================================================+\n" \
                "|                OEC-SYNC - MAIN MENU                  |\n" \
                "+======================================================+\n"
        title = Style.apply(title, Style.HEADER, Style.BOLD) + \
            "OEC repository:  %s\n" \
            "Current user:    %s\n" \
            "Remote requests: %d\n" \
            "Local requests:  %d\n" \
            % (sync_object.db.repo.html_url,
                sync_object.db.user.login,
                len(sync_object.db.requests),
                len(local_requests))

        try:
            if not Cli.menu(title, options):
                break
        except EOFError:
            return
示例#35
0
class Channels:
    __storage = StorageController().__get_instance__
    __JWT = JWTController().__get_instance__
    __sync = Synchronizer().__get_instance__

    def __retrieve_user__(self, id):
        try:
            user = self.__storage.search(id, self.__storage.db_clients)
            return user, user['key']
        except IndexError as err:
            logging.error(err)
            return err

    def add_channel(self):
        data = request.get_json()
        payload = data['payload']
        try:
            user, key = self.__retrieve_user__(data['user'])
        except Exception as e:
            return jsonify({
                "type": "Authentication error",
                "msg": "User not founded"
            }), 401

        try:
            deserialized_data = self.__JWT.verify_token(payload, key)
        except InvalidJWSSignature as signErr:
            logging.error(f"Error in the verification {signErr}")
            return jsonify({
                "type": "error",
                "msg": "Error verifying the token",
                "system_msg": signErr.args
            }), 401
        except Exception as err:
            logging.error(err)
            return jsonify({
                "type": "Unrecognized error",
                "msg": err.args
            }), 401

        data = {
            "owner": data['user'],
            "name": deserialized_data['name'],
            "creation_time": time.time(),
            "queues": []
        }

        channel = self.__storage.insert(data, self.__storage.db_channels)
        self.__sync.update()
        return jsonify({
            "type": "response",
            "payload": self.__JWT.generate_token(channel, key['k']),
            "time": time.time()
        }), 200

    def list_channels(self, user):
        try:
            user, key = self.__retrieve_user__(user)  # Encrypt response
        except Exception as e:
            return jsonify({
                "type": "Authentication error",
                "msg": "User not founded"
            }), 401

        channels = self.__storage.get_all_documents(self.__storage.db_channels)
        return jsonify({
            "type":
            "response",
            "payload":
            self.__JWT.generate_token({"reponse": channels}, key['k']),
            "time":
            time.time()
        }), 200

    def remove_channel(self, channel, user):
        try:
            user, key = self.__retrieve_user__(user)
        except Exception as e:
            return jsonify({
                "type": "Authentication error",
                "msg": "User not founded"
            }), 401

        try:
            deserialized_data = self.__JWT.verify_token(channel, key)
        except InvalidJWSSignature as signErr:
            logging.error(f"Error in the verification {signErr}")
            return jsonify({
                "type": "error",
                "msg": "Error en la verifying the token",
                "system_msg": signErr.args
            }), 401
        except Exception as err:
            logging.error(err)
            return jsonify({
                "type": "Unrecognized error",
                "msg": err.args
            }), 401

        channel = self.__storage.search(deserialized_data['id'],
                                        self.__storage.db_channels)

        if channel['owner'] != user['_id']:
            return jsonify({
                'type':
                "Authentication error",
                'msg':
                "You need be the owner of the channel to remove it "
            })

        id = self.__storage.delete(deserialized_data['id'],
                                   self.__storage.db_channels)
        self.__sync.update()
        return jsonify({
            "type":
            "response",
            "payload":
            self.__JWT.generate_token({
                "time": time.time(),
                "deleted": id
            }, key['k']),
            "time":
            time.time()
        }), 200

    def add_queue(self):
        data = request.get_json()
        payload = data['payload']
        try:
            user, key = self.__retrieve_user__(data['user'])
        except Exception as e:
            return jsonify({
                "type": "Authentication error",
                "msg": "User not founded"
            }), 401

        try:
            deserialized_data = self.__JWT.verify_token(payload, key)
        except InvalidJWSSignature as signErr:
            logging.error(f"Error in the verification {signErr}")
            return jsonify({
                "type": "error",
                "msg": "Error en la verifying the token",
                "system_msg": signErr.args
            }), 401
        except Exception as err:
            logging.error(err)
            return jsonify({
                "type": "Unrecognized error",
                "msg": err.args
            }), 401

        try:
            channel = self.__storage.search(deserialized_data['channel'],
                                            self.__storage.db_channels)
        except Exception as e:
            return jsonify({"type": "error", "msg": "Channel not found"}), 401

        channel['queues'].append({
            "name": deserialized_data['name'],
            "creation_time": time.time()
        })

        id = self.__storage.update(deserialized_data['channel'],
                                   {"queues": channel['queues']},
                                   self.__storage.db_channels)
        self.__sync.update()
        return jsonify({
            "type":
            "response",
            "payload":
            self.__JWT.generate_token({
                'edited': id,
                'time': time.time()
            }),
            "time":
            time.time()
        }), 200

    def list_queue(self, channel, user):
        try:
            user, key = self.__retrieve_user__(user)
        except Exception as e:
            return jsonify({
                "type": "Authentication error",
                "msg": "User not founded"
            }), 401

        try:
            deserialized_data = self.__JWT.verify_token(channel, key)
        except InvalidJWSSignature as signErr:
            logging.error(f"Error in the verification {signErr}")
            return jsonify({
                "type": "error",
                "msg": "Error en la verifying the token",
                "system_msg": signErr.args
            }), 401
        except Exception as err:
            logging.error(err)
            return jsonify({
                "type": "Unrecognized error",
                "msg": err.args
            }), 401

        channel = self.__storage.search(deserialized_data['channel'],
                                        self.__storage.db_channels)
        return jsonify({
            "type":
            "response",
            "payload":
            self.__JWT.generate_token({"response": channel['queues']},
                                      key['k']),
            "time":
            time.time()
        }), 200

    def delete_queue(self, channel, user):
        try:
            user, key = self.__retrieve_user__(user)
        except Exception as e:
            return jsonify({
                "type": "Authentication error",
                "msg": "User not founded"
            }), 401

        try:
            deserialized_data = self.__JWT.verify_token(channel, key)
        except InvalidJWSSignature as signErr:
            logging.error(f"Error in the verification {signErr}")
            return jsonify({
                "type": "error",
                "msg": "Error en la verifying the token",
                "system_msg": signErr.args
            }), 401
        except Exception as err:
            logging.error(err)
            return jsonify({
                "type": "Unrecognized error",
                "msg": err.args
            }), 401

        channel = self.__storage.search(deserialized_data['channel'],
                                        self.__storage.db_channels)

        if channel['owner'] != user['_id']:
            return jsonify({
                'type':
                "Authentication error",
                'msg':
                "You need be the owner of the channel to remove a queue"
            })

        index = next((i for (i, d) in enumerate(channel['queues'])
                      if d['name'] == deserialized_data['queue']), None)
        logging.debug(index)
        channel['queues'].pop(index)
        logging.debug(channel)

        id = self.__storage.update(channel['_id'],
                                   {'queues': channel['queues']},
                                   self.__storage.db_channels)
        self.__sync.update()
        return jsonify({
            "type":
            "response",
            "payload":
            self.__JWT.generate_token({
                "deleted": id,
                "time": time.time()
            }, key['k']),
            "time":
            time.time()
        })
示例#36
0
 def init(config: str):
     Interface.__config = config
     Interface.__syncr = Synchronizer(config)
示例#37
0
clients = Clients()
channels = Channels()

logging.basicConfig(level=logging.DEBUG)  # Configure logging

# Configure flask
flask_app = FlaskInstance().__getinstance__
app = flask_app.get_app()

# Creating symmetric key for the server
jwt = JWTController().__get_instance__
jwt.set_global_key(jwt.generate_new_symmetric_key())
logging.info(f"Use the following key to encrypt the data in order to talk with this server in external connections: {jwt.get_global_key}")

syncker = Synchronizer().__get_instance__

# Configure endpoints
# Client endpoints
flask_app.add_endpoint('/user', "new_user", clients.add_user, ['post'])
flask_app.add_endpoint('/user/<user>', 'delete_user', clients.delete_user, ['delete'])
flask_app.add_endpoint('/users', 'get_all_users', clients.get_all_users, ['get'])
flask_app.add_endpoint('/user/<user>', 'search_user', clients.get_user, ['get'])

# Channels endpoints
flask_app.add_endpoint('/channel', "new_channel", channels.add_channel, ['post'])
flask_app.add_endpoint('/channels/<user>', "list_channels", channels.list_channels, ['get'])
flask_app.add_endpoint('/channel/<channel>/<user>', "remove_channel", channels.remove_channel, ['delete'])

# Queue endpoints
flask_app.add_endpoint('/channel/add', "new_queue", channels.add_queue, ['post'])
示例#38
0
    def DoApplyProxy(self, protos, hosts, ports, user, pwd, noproxy, useauth):

        # Check before applying....
        checknames = ('bash', 'environment', 'apt', 'gsettings', 'sudoers')
        checkfuncs = (backend.check_bash, backend.check_environment,
                      backend.check_apt, backend.check_gsettings,
                      backend.check_sudoers)
        remfuncs = (backend.remove_bash, backend.remove_environment,
                    backend.remove_apt, backend.remove_gsettings,
                    backend.remove_sudoers)
        found = []
        remfound = {}
        for cfunc, name, rfunc in zip(checkfuncs, checknames, remfuncs):
            logging.info('Checking {}...'.format(name))
            result = cfunc()
            if result:
                found.extend(result)
                remfound[name] = rfunc

        # If found any settings, ask for overwrite
        if found:
            message = ('Proxy settings were detected in:\n{}'
                       .format('\n'.join(found)))
            logging.warning(message)
            warnbox = Synchronizer(wx.MessageBox,
                                   args=('Some proxy settings were detected '
                                         'in your system. Do you want to '
                                         'overwite them?', 'Confirm Overwrite'
                                         ),
                                   kwargs={'style': wx.CENTRE |
                                           wx.ICON_QUESTION | wx.YES_NO})
            overwrite = warnbox.run()
            if overwrite == wx.NO:
                logging.info('No settings were applied.')
                return
            else:
                logging.warning('Overwriting settings...')
                for name, rfunc in remfound.items():
                    logging.info('Removing {}...'.format(name))
                    rfunc()

        # Catch all the exceptions individually and report later
        errors = []
        try:
            logging.info('Setting bash...')
            backend.set_bash(protos, hosts, ports, user=user, pwd=pwd,
                             noproxy=noproxy, useauth=useauth)
        except Exception as e:
            errors.append(e)
        try:
            logging.info('Setting environment...')
            backend.set_environment(protos, hosts, ports, user=user, pwd=pwd,
                                    noproxy=noproxy, useauth=useauth)
        except Exception as e:
            errors.append(e)
        try:
            logging.info('Setting apt...')
            backend.set_apt(protos, hosts, ports, user=user, pwd=pwd,
                            useauth=useauth)
        except Exception as e:
            errors.append(e)
        try:
            logging.info('Setting gsettings...')
            backend.set_gsettings(protos, hosts, ports, user=user, pwd=pwd,
                                  noproxy=noproxy)
        except Exception as e:
            errors.append(e)
        try:
            logging.info('Setting sudoers...')
            backend.set_sudoers(protos, noproxy=noproxy)
        except Exception as e:
            errors.append(e)

        # Finalize
        if errors:
            errstring = '\n'.join((str(e) for e in errors))
            logging.error('The following errors occured while applying proxy '
                          'settings\n{}'.format(errstring))
        else:
            logging.info('Proxy settings were succesfully applied.')
            okbox = Synchronizer(wx.MessageBox,
                                 args=('Proxy settings were succesfully '
                                       'applied. You might have to restart '
                                       'your browser or any other '
                                       'applications for changes to take '
                                       'effect.', 'Settings Applied'),
                                 kwargs={'style': wx.OK})
            okbox.run()
 def getSynchronizer(self):
     return Synchronizer(self.getRoot1(), self.getRoot2(),
                         self._rcloneCommand)
示例#40
0
class ChainService(WiredService):

    """
    Manages the chain and requests to it.
    """
    # required by BaseService
    name = 'chain'
    default_config = dict(eth=dict(network_id=0, genesis_nonce=GENESIS_NONCE.encode('hex')))

    # required by WiredService
    wire_protocol = eth_protocol.ETHProtocol  # create for each peer

    # initialized after configure:
    chain = None
    genesis = None
    synchronizer = None
    config = None
    block_queue_size = 1024
    transaction_queue_size = 1024
    processed_gas = 0
    processed_elapsed = 0

    def __init__(self, app):
        self.config = app.config
        self.db = app.services.db
        assert self.db is not None
        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        coinbase = app.services.accounts.coinbase
        _genesis = genesis(self.db, nonce=self.config['eth']['genesis_nonce'].decode('hex'))
        self.chain = Chain(self.db, genesis=_genesis, new_head_cb=self._on_new_head,
                           coinbase=coinbase)
        log.info('chain at', number=self.chain.head.number)
        self.synchronizer = Synchronizer(self, force_sync=None)

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.add_transaction_lock = gevent.lock.Semaphore()
        self.broadcast_filter = DuplicatesFilter()
        self.on_new_head_cbs = []
        self.on_new_head_candidate_cbs = []
        self.newblock_processing_times = deque(maxlen=1000)
        # gevent.spawn(update_watcher, self)

    @property
    def is_syncing(self):
        return self.synchronizer.synctask is not None

    @property
    def is_mining(self):
        if 'pow' in self.app.services:
            return self.app.services.pow.active
        return False

    def _on_new_head(self, block):
        for cb in self.on_new_head_cbs:
            cb(block)
        self._on_new_head_candidate()  # we implicitly have a new head_candidate

    def _on_new_head_candidate(self):
        for cb in self.on_new_head_candidate_cbs:
            cb(self.chain.head_candidate)

    def add_transaction(self, tx, origin=None):
        log.debug('add_transaction', locked=self.add_transaction_lock.locked(), tx=tx)
        assert isinstance(tx, Transaction)
        assert origin is None or isinstance(origin, BaseProtocol)

        if tx.hash in self.broadcast_filter:
            log.debug('discarding known tx')  # discard early
            return

        # validate transaction
        try:
            validate_transaction(self.chain.head_candidate, tx)
            log.debug('valid tx, broadcasting')
            self.broadcast_transaction(tx, origin=origin)  # asap
        except InvalidTransaction as e:
            log.debug('invalid tx', error=e)
            return

        if origin is not None:  # not locally added via jsonrpc
            if not self.is_mining or self.is_syncing:
                log.debug('discarding tx', syncing=self.is_syncing, mining=self.is_mining)
                return

        self.add_transaction_lock.acquire()
        success = self.chain.add_transaction(tx)
        self.add_transaction_lock.release()
        if success:
            self._on_new_head_candidate()


    def add_block(self, t_block, proto):
        "adds a block to the block_queue and spawns _add_block if not running"
        self.block_queue.put((t_block, proto))  # blocks if full
        if not self.add_blocks_lock:
            self.add_blocks_lock = True  # need to lock here (ctx switch is later)
            gevent.spawn(self._add_blocks)

    def add_mined_block(self, block):
        log.debug('adding mined block', block=block)
        assert isinstance(block, Block)
        assert block.header.check_pow()
        if self.chain.add_block(block):
            log.debug('added', block=block, ts=time.time())
            assert block == self.chain.head
            self.broadcast_newblock(block, chain_difficulty=block.chain_difficulty())

    def knows_block(self, block_hash):
        "if block is in chain or in queue"
        if block_hash in self.chain:
            return True
        # check if queued or processed
        for i in range(len(self.block_queue.queue)):
            if block_hash == self.block_queue.queue[i][0].header.hash:
                return True
        return False

    def _add_blocks(self):
        log.debug('add_blocks', qsize=self.block_queue.qsize(),
                  add_tx_lock=self.add_transaction_lock.locked())
        assert self.add_blocks_lock is True
        self.add_transaction_lock.acquire()
        try:
            while not self.block_queue.empty():
                t_block, proto = self.block_queue.peek()  # peek: knows_block while processing
                if t_block.header.hash in self.chain:
                    log.warn('known block', block=t_block)
                    self.block_queue.get()
                    continue
                if t_block.header.prevhash not in self.chain:
                    log.warn('missing parent', block=t_block)
                    self.block_queue.get()
                    continue
                # FIXME, this is also done in validation and in synchronizer for new_blocks
                if not t_block.header.check_pow():
                    log.warn('invalid pow', block=t_block, FIXME='ban node')
                    self.block_queue.get()
                    continue
                try:  # deserialize
                    st = time.time()
                    block = t_block.to_block(db=self.chain.db)
                    elapsed = time.time() - st
                    log.debug('deserialized', elapsed='%.4fs' % elapsed, ts=time.time(),
                              gas_used=block.gas_used, gpsec=self.gpsec(block.gas_used, elapsed))
                except processblock.InvalidTransaction as e:
                    log.warn('invalid transaction', block=t_block, error=e, FIXME='ban node')
                    self.block_queue.get()
                    continue
                except VerificationFailed as e:
                    log.warn('verification failed', error=e, FIXME='ban node')
                    self.block_queue.get()
                    continue
                log.debug('adding', block=block, ts=time.time())
                if self.chain.add_block(block, forward_pending_transactions=self.is_mining):
                    now = time.time()
                    log.debug('added', block=block, ts=now, txs=len(block.get_transactions()))
                    if t_block.newblock_timestamp:
                        total = now - t_block.newblock_timestamp
                        self.newblock_processing_times.append(total)
                        avg = statistics.mean(self.newblock_processing_times)
                        med = statistics.median(self.newblock_processing_times)
                        max_ = max(self.newblock_processing_times)
                        min_ = min(self.newblock_processing_times)
                        log.debug('processing time', last=total, avg=avg, max=max_, min=min_,
                                 median=med)
                else:
                    log.warn('could not add', block=block)
                self.block_queue.get()  # remove block from queue (we peeked only)
                gevent.sleep(0.001)
        finally:
            self.add_blocks_lock = False
            self.add_transaction_lock.release()

    def gpsec(self, gas_spent=0, elapsed=0):
        if gas_spent:
            self.processed_gas += gas_spent
            self.processed_elapsed += elapsed
        return int(self.processed_gas / (0.001 + self.processed_elapsed))

    def broadcast_newblock(self, block, chain_difficulty=None, origin=None):
        if not chain_difficulty:
            assert block.hash in self.chain
            chain_difficulty = block.chain_difficulty()
        assert isinstance(block, (eth_protocol.TransientBlock, Block))
        if self.broadcast_filter.update(block.header.hash):
            log.debug('broadcasting newblock', origin=origin)
            bcast = self.app.services.peermanager.broadcast
            bcast(eth_protocol.ETHProtocol, 'newblock', args=(block, chain_difficulty),
                  exclude_peers=[origin.peer] if origin else [])
        else:
            log.debug('already broadcasted block')

    def broadcast_transaction(self, tx, origin=None):
        assert isinstance(tx, Transaction)
        if self.broadcast_filter.update(tx.hash):
            log.debug('broadcasting tx', origin=origin)
            bcast = self.app.services.peermanager.broadcast
            bcast(eth_protocol.ETHProtocol, 'transactions', args=(tx,),
                  exclude_peers=[origin.peer] if origin else [])
        else:
            log.debug('already broadcasted tx')

    # wire protocol receivers ###########

    def on_wire_protocol_start(self, proto):
        log.debug('----------------------------------')
        log.debug('on_wire_protocol_start', proto=proto)
        assert isinstance(proto, self.wire_protocol)
        # register callbacks
        proto.receive_status_callbacks.append(self.on_receive_status)
        proto.receive_transactions_callbacks.append(self.on_receive_transactions)
        proto.receive_getblockhashes_callbacks.append(self.on_receive_getblockhashes)
        proto.receive_blockhashes_callbacks.append(self.on_receive_blockhashes)
        proto.receive_getblocks_callbacks.append(self.on_receive_getblocks)
        proto.receive_blocks_callbacks.append(self.on_receive_blocks)
        proto.receive_newblock_callbacks.append(self.on_receive_newblock)
        proto.receive_newblockhashes_callbacks.append(self.on_newblockhashes)

        # send status
        head = self.chain.head
        proto.send_status(chain_difficulty=head.chain_difficulty(), chain_head_hash=head.hash,
                          genesis_hash=self.chain.genesis.hash)

    def on_wire_protocol_stop(self, proto):
        assert isinstance(proto, self.wire_protocol)
        log.debug('----------------------------------')
        log.debug('on_wire_protocol_stop', proto=proto)

    def on_receive_status(self, proto, eth_version, network_id, chain_difficulty, chain_head_hash,
                          genesis_hash):
        log.debug('----------------------------------')
        log.debug('status received', proto=proto, eth_version=eth_version)
        assert eth_version == proto.version, (eth_version, proto.version)
        if network_id != self.config['eth'].get('network_id', proto.network_id):
            log.warn("invalid network id", remote_network_id=network_id,
                     expected_network_id=self.config['eth'].get('network_id', proto.network_id))
            raise eth_protocol.ETHProtocolError('wrong network_id')

        # check genesis
        if genesis_hash != self.chain.genesis.hash:
            log.warn("invalid genesis hash", remote_id=proto, genesis=genesis_hash.encode('hex'))
            raise eth_protocol.ETHProtocolError('wrong genesis block')

        # request chain
        self.synchronizer.receive_status(proto, chain_head_hash, chain_difficulty)

        # send transactions
        transactions = self.chain.get_transactions()
        if transactions:
            log.debug("sending transactions", remote_id=proto)
            proto.send_transactions(*transactions)

    # transactions

    def on_receive_transactions(self, proto, transactions):
        "receives rlp.decoded serialized"
        log.debug('----------------------------------')
        log.debug('remote_transactions_received', count=len(transactions), remote_id=proto)
        for tx in transactions:
            self.add_transaction(tx, origin=proto)

    # blockhashes ###########

    def on_newblockhashes(self, proto, newblockhashes):
        """
        msg sent out if not the full block is propagated
        chances are high, that we get the newblock, though.
        """
        log.debug('----------------------------------')
        log.debug("recv newnewblockhashes", num=len(newblockhashes), remote_id=proto)
        self.synchronizer.receive_newblockhashes(proto, newblockhashes)

    def on_receive_getblockhashes(self, proto, child_block_hash, count):
        log.debug('----------------------------------')
        log.debug("handle_get_blockhashes", count=count, block_hash=encode_hex(child_block_hash))
        max_hashes = min(count, self.wire_protocol.max_getblockhashes_count)
        found = []
        if child_block_hash not in self.chain:
            log.debug("unknown block")
            proto.send_blockhashes(*[])
            return

        last = child_block_hash
        while len(found) < max_hashes:
            try:
                last = rlp.decode_lazy(self.chain.db.get(last))[0][0]  # [head][prevhash]
            except KeyError:
                # this can happen if we started a chain download, which did not complete
                # should not happen if the hash is part of the canonical chain
                log.warn('KeyError in getblockhashes', hash=last)
                break
            if last:
                found.append(last)
            else:
                break

        log.debug("sending: found block_hashes", count=len(found))
        proto.send_blockhashes(*found)

    def on_receive_blockhashes(self, proto, blockhashes):
        log.debug('----------------------------------')
        if blockhashes:
            log.debug("on_receive_blockhashes", count=len(blockhashes), remote_id=proto,
                      first=encode_hex(blockhashes[0]), last=encode_hex(blockhashes[-1]))
        else:
            log.debug("recv 0 remote block hashes, signifying genesis block")
        self.synchronizer.receive_blockhashes(proto, blockhashes)

    # blocks ################

    def on_receive_getblocks(self, proto, blockhashes):
        log.debug('----------------------------------')
        log.debug("on_receive_getblocks", count=len(blockhashes))
        found = []
        for bh in blockhashes[:self.wire_protocol.max_getblocks_count]:
            try:
                found.append(self.chain.db.get(bh))
            except KeyError:
                log.debug("unknown block requested", block_hash=encode_hex(bh))
        if found:
            log.debug("found", count=len(found))
            proto.send_blocks(*found)

    def on_receive_blocks(self, proto, transient_blocks):
        log.debug('----------------------------------')
        blk_number = max(x.header.number for x in transient_blocks) if transient_blocks else 0
        log.debug("recv blocks", count=len(transient_blocks), remote_id=proto,
                  highest_number=blk_number)
        if transient_blocks:
            self.synchronizer.receive_blocks(proto, transient_blocks)

    def on_receive_newblock(self, proto, block, chain_difficulty):
        log.debug('----------------------------------')
        log.debug("recv newblock", block=block, remote_id=proto)
        self.synchronizer.receive_newblock(proto, block, chain_difficulty)
示例#41
0
class ChainManager(StoppableLoopThread):

    """
    Manages the chain and requests to it.
    """

    # initialized after configure:
    genesis = None
    index = None
    miner = None
    blockchain = None
    synchronizer = None
    config = None

    def __init__(self):
        super(ChainManager, self).__init__()

    def configure(self, config, genesis=None, db=None):
        self.config = config
        if not db:
            db_path = utils.db_path(config.get('misc', 'data_dir'))
            log.info('opening chain', db_path=db_path)
            db = self.blockchain = DB(db_path)
        self.blockchain = db
        self.index = Index(db)
        if genesis:
            self._initialize_blockchain(genesis)
        log.debug('chain @', head_hash=self.head)
        self.genesis = blocks.genesis(db=db)
        log.debug('got genesis', genesis_hash=self.genesis)
        self.new_miner()
        self.synchronizer = Synchronizer(self)

    def _initialize_blockchain(self, genesis=None):
        log.info('Initializing new chain')
        if not genesis:
            genesis = blocks.genesis(self.blockchain)
            log.info('new genesis', genesis_hash=genesis)
            self.index.add_block(genesis)
        self._store_block(genesis)
        assert genesis == blocks.get_block(self.blockchain, genesis.hash)
        self._update_head(genesis)
        assert genesis.hash in self

    @property
    def head(self):
        if not self.config:
            self.configure(config.read_config())
        if not self.blockchain or 'HEAD' not in self.blockchain:
            self._initialize_blockchain()
        ptr = self.blockchain.get('HEAD')
        return blocks.get_block(self.blockchain, ptr)

    def _update_head(self, block):
        if not block.is_genesis():
            assert self.head.chain_difficulty() < block.chain_difficulty()
            if block.get_parent() != self.head:
                log.debug('New Head is on a different branch', head_hash=block, old_head_hash=self.head)
        self.blockchain.put('HEAD', block.hash)
        self.index.update_blocknumbers(self.head)
        self.new_miner()  # reset mining

    def get(self, blockhash):
        assert isinstance(blockhash, str)
        assert len(blockhash) == 32
        return blocks.get_block(self.blockchain, blockhash)

    def has_block(self, blockhash):
        assert isinstance(blockhash, str)
        assert len(blockhash) == 32
        return blockhash in self.blockchain

    def __contains__(self, blockhash):
        return self.has_block(blockhash)

    def _store_block(self, block):
        self.blockchain.put(block.hash, block.serialize())

    def commit(self):
        self.blockchain.commit()


    def loop_body(self):
        ts = time.time()
        pct_cpu = self.config.getint('misc', 'mining')
        if pct_cpu > 0:
            self.mine()
            delay = (time.time() - ts) * (100. / pct_cpu - 1)
            assert delay >= 0
            time.sleep(min(delay, 1.))
        else:
            time.sleep(.01)

    def new_miner(self):
        "new miner is initialized if HEAD is updated"
        # prepare uncles
        uncles = set(self.get_uncles(self.head))
        ineligible = set()  # hashes
        blk = self.head
        for i in range(8):
            for u in blk.uncles:  # assuming uncle headers
                u = utils.sha3(rlp.encode(u))
                if u in self:
                    uncles.discard(self.get(u))
            if blk.has_parent():
                blk = blk.get_parent()

        miner = Miner(self.head, uncles, self.config.get('wallet', 'coinbase'))
        if self.miner:
            for tx in self.miner.get_transactions():
                miner.add_transaction(tx)
        self.miner = miner

    def mine(self):
        with self.lock:
            block = self.miner.mine()
            if block:
                # create new block
                if not self.add_block(block, forward=True):
                    log.debug("newly mined block is invalid!?", block_hash=block)
                    self.new_miner()

    def receive_chain(self, transient_blocks, peer=None):
        with self.lock:
            old_head = self.head
            # assuming to receive chain order w/ oldest block first
            transient_blocks.sort(key=attrgetter('number'))
            assert transient_blocks[0].number <= transient_blocks[-1].number

            # notify syncer
            self.synchronizer.received_blocks(peer, transient_blocks)

            for t_block in transient_blocks:  # oldest to newest
                log.debug('Checking PoW', block_hash=t_block)
                if not blocks.check_header_pow(t_block.header_args):
                    log.debug('Invalid PoW', block_hash=t_block)
                    continue
                log.debug('Deserializing', block_hash=t_block)
                try:
                    block = blocks.Block.deserialize(self.blockchain, t_block.rlpdata)
                except processblock.InvalidTransaction as e:
                    # FIXME there might be another exception in
                    # blocks.deserializeChild when replaying transactions
                    # if this fails, we need to rewind state
                    log.debug('invalid transaction', block_hash=t_block, error=e)
                    # stop current syncing of this chain and skip the child blocks
                    self.synchronizer.stop_synchronization(peer)
                    return
                except blocks.UnknownParentException:
                    if t_block.prevhash == blocks.GENESIS_PREVHASH:
                        log.debug('Rec Incompatible Genesis', block_hash=t_block)
                        if peer:
                            peer.send_Disconnect(reason='Wrong genesis block')
                    else:  # should be a single newly mined block
                        assert t_block.prevhash not in self
                        assert t_block.prevhash != self.genesis.hash
                        log.debug('unknown parent', block_hash=t_block,
                                  parent_hash=t_block.prevhash.encode('hex'), remote_id=peer)
                        if len(transient_blocks) != 1:
                            # strange situation here.
                            # we receive more than 1 block, so it's not a single newly mined one
                            # sync/network/... failed to add the needed parent at some point
                            # well, this happens whenever we can't validate a block!
                            # we should disconnect!
                            log.warn(
                                'blocks received, but unknown parent.', num=len(transient_blocks))
                        if peer:
                            # request chain for newest known hash
                            self.synchronizer.synchronize_unknown_block(
                                peer, transient_blocks[-1].hash)
                    break
                if block.hash in self:
                    log.debug('known', block_hash=block)
                else:
                    assert block.has_parent()
                    # assume single block is newly mined block
                    forward = len(transient_blocks) == 1
                    success = self.add_block(block, forward=forward)
                    if success:
                        log.debug('added', block_hash=block)

    def add_block(self, block, forward=False):
        "returns True if block was added sucessfully"
        _log = log.bind(block_hash=block)
        # make sure we know the parent
        if not block.has_parent() and not block.is_genesis():
            _log.debug('missing parent')
            return False

        if not block.validate_uncles():
            _log.debug('invalid uncles')
            return False

        # check PoW and forward asap in order to avoid stale blocks
        if not len(block.nonce) == 32:
            _log.debug('nonce not set')
            return False
        elif not block.check_proof_of_work(block.nonce) and\
                not block.is_genesis():
            _log.debug('invalid nonce')
            return False
        # Forward block w/ valid PoW asap (if not syncing)
        # FIXME: filter peer by wich block was received
        if forward:
            _log.debug("broadcasting new")
            signals.broadcast_new_block.send(sender=None, block=block)

        if block.has_parent():
            try:
                processblock.verify(block, block.get_parent())
            except processblock.VerificationFailed as e:
                _log.critical('VERIFICATION FAILED', error=e)
                f = os.path.join(utils.data_dir, 'badblock.log')
                open(f, 'w').write(str(block.hex_serialize()))
                return False

        if block.number < self.head.number:
            _log.debug("older than head", head_hash=self.head)
            # Q: Should we have any limitations on adding blocks?

        self.index.add_block(block)
        self._store_block(block)

        # set to head if this makes the longest chain w/ most work for that number
        if block.chain_difficulty() > self.head.chain_difficulty():
            _log.debug('new head')
            self._update_head(block)
        elif block.number > self.head.number:
            _log.warn('has higher blk number than head but lower chain_difficulty',
                      head_hash=self.head, block_difficulty=block.chain_difficulty(),
                      head_difficulty=self.head.chain_difficulty())
        self.commit()  # batch commits all changes that came with the new block
        return True

    def get_children(self, block):
        return [self.get(c) for c in self.index.get_children(block.hash)]

    def get_uncles(self, block):
        if not block.has_parent():
            return []
        parent = block.get_parent()
        o = []
        i = 0
        while parent.has_parent() and i < 6:
            grandparent = parent.get_parent()
            o.extend([u for u in self.get_children(grandparent) if u != parent])
            parent = grandparent
            i += 1
        return o

    def add_transaction(self, transaction):
        _log = log.bind(tx_hash=transaction)
        _log.debug("add transaction")
        with self.lock:
            res = self.miner.add_transaction(transaction)
            if res:
                _log.debug("broadcasting valid")
                signals.send_local_transactions.send(
                    sender=None, transactions=[transaction])
            return res

    def get_transactions(self):
        log.debug("get_transactions called")
        return self.miner.get_transactions()

    def get_chain(self, start='', count=NUM_BLOCKS_PER_REQUEST):
        "return 'count' blocks starting from head or start"
        log.debug("get_chain", start=start.encode('hex'), count=count)
        blocks = []
        block = self.head
        if start:
            if start in self.index.db:
                return []
            block = self.get(start)
            if not self.in_main_branch(block):
                return []
        for i in range(count):
            blocks.append(block)
            if block.is_genesis():
                break
            block = block.get_parent()
        return blocks

    def in_main_branch(self, block):
        try:
            return block.hash == self.index.get_block_by_number(block.number)
        except KeyError:
            return False

    def get_descendants(self, block, count=1):
        log.debug("get_descendants", block_hash=block)
        assert block.hash in self
        block_numbers = range(block.number + 1, min(self.head.number, block.number + count))
        return [self.get(self.index.get_block_by_number(n)) for n in block_numbers]
示例#42
0
class ChainManager(StoppableLoopThread):
    """
    Manages the chain and requests to it.
    """

    # initialized after configure:
    genesis = None
    index = None
    miner = None
    blockchain = None
    synchronizer = None

    def __init__(self):
        super(ChainManager, self).__init__()

    def configure(self, config, genesis=None):
        self.config = config
        logger.info('Opening chain @ %s', utils.get_db_path())
        db = self.blockchain = DB(utils.get_db_path())
        self.index = Index(db)
        if genesis:
            self._initialize_blockchain(genesis)
        logger.debug('Chain @ #%d %s', self.head.number, self.head.hex_hash())
        self.genesis = blocks.CachedBlock.create_cached(blocks.genesis())
        self.new_miner()
        self.synchronizer = Synchronizer(self)

    @property
    def head(self):
        if 'HEAD' not in self.blockchain:
            self._initialize_blockchain()
        ptr = self.blockchain.get('HEAD')
        return blocks.get_block(ptr)

    def _update_head(self, block):
        if not block.is_genesis():
            assert self.head.chain_difficulty() < block.chain_difficulty()
            if block.get_parent() != self.head:
                logger.debug(
                    'New Head %r is on a different branch. Old was:%r', block,
                    self.head)
        self.blockchain.put('HEAD', block.hash)
        self.index.update_blocknumbers(self.head)
        self.new_miner()  # reset mining

    def get(self, blockhash):
        assert isinstance(blockhash, str)
        assert len(blockhash) == 32
        return blocks.get_block(blockhash)

    def has_block(self, blockhash):
        assert isinstance(blockhash, str)
        assert len(blockhash) == 32
        return blockhash in self.blockchain

    def __contains__(self, blockhash):
        return self.has_block(blockhash)

    def _store_block(self, block):
        self.blockchain.put(block.hash, block.serialize())

    def commit(self):
        self.blockchain.commit()

    def _initialize_blockchain(self, genesis=None):
        logger.info('Initializing new chain @ %s', utils.get_db_path())
        if not genesis:
            genesis = blocks.genesis()
            self.index.add_block(genesis)
        self._store_block(genesis)
        self._update_head(genesis)
        assert genesis.hash in self

    def loop_body(self):
        ts = time.time()
        pct_cpu = self.config.getint('misc', 'mining')
        if pct_cpu > 0:
            self.mine()
            delay = (time.time() - ts) * (100. / pct_cpu - 1)
            if delay < 0:
                logger.warn('delay %r<0!?', delay)
                delay = 1
            assert delay >= 0
            time.sleep(min(delay, 1.))
        else:
            time.sleep(.01)

    def new_miner(self):
        "new miner is initialized if HEAD is updated"
        # prepare uncles
        uncles = set(self.get_uncles(self.head))
        #        logger.debug('%d uncles for next block %r', len(uncles), uncles)
        ineligible = set()  # hashes
        blk = self.head
        for i in range(8):
            for u in blk.uncles:  # assuming uncle headres
                u = utils.sha3(rlp.encode(u))
                if u in self:
                    #                    logger.debug('ineligible uncle %r', u.encode('hex'))
                    uncles.discard(self.get(u))
            if blk.has_parent():
                blk = blk.get_parent()


#        logger.debug('%d uncles after filtering %r', len(uncles), uncles)

        miner = Miner(self.head, uncles, self.config.get('wallet', 'coinbase'))
        if self.miner:
            for tx in self.miner.get_transactions():
                miner.add_transaction(tx)
        self.miner = miner

    def mine(self):
        with self.lock:
            block = self.miner.mine()
            if block:
                # create new block
                if self.add_block(block):
                    logger.debug("broadcasting new %r" % block)
                    signals.broadcast_new_block.send(sender=None, block=block)
                else:
                    self.new_miner()

    def receive_chain(self, transient_blocks, peer=None):
        with self.lock:
            old_head = self.head
            # assuming to receive chain order w/ oldest block first
            transient_blocks.sort(key=attrgetter('number'))
            assert transient_blocks[0].number <= transient_blocks[-1].number

            # notify syncer
            self.synchronizer.received_blocks(peer, transient_blocks)

            for t_block in transient_blocks:  # oldest to newest
                logger.debug('Deserializing %r', t_block)
                #logger.debug(t_block.rlpdata.encode('hex'))
                try:
                    block = blocks.Block.deserialize(t_block.rlpdata)
                except processblock.InvalidTransaction as e:
                    # FIXME there might be another exception in
                    # blocks.deserializeChild when replaying transactions
                    # if this fails, we need to rewind state
                    logger.debug('%r w/ invalid Transaction %r', t_block, e)
                    # stop current syncing of this chain and skip the child blocks
                    self.synchronizer.stop_synchronization(peer)
                    return
                except blocks.UnknownParentException:
                    if t_block.prevhash == blocks.GENESIS_PREVHASH:
                        logger.debug('Rec Incompatible Genesis %r', t_block)
                        if peer:
                            peer.send_Disconnect(reason='Wrong genesis block')
                    else:  # should be a single newly mined block
                        assert t_block.prevhash not in self
                        assert t_block.prevhash != blocks.genesis().hash
                        logger.debug('%s with unknown parent %s, peer:%r',
                                     t_block, t_block.prevhash.encode('hex'),
                                     peer)
                        if len(transient_blocks) != 1:
                            # strange situation here.
                            # we receive more than 1 block, so it's not a single newly mined one
                            # sync/network/... failed to add the needed parent at some point
                            # well, this happens whenever we can't validate a block!
                            # we should disconnect!
                            logger.warn('%s received, but unknown parent.',
                                        len(transient_blocks))
                        if peer:
                            # request chain for newest known hash
                            self.synchronizer.synchronize_unknown_block(
                                peer, transient_blocks[-1].hash)
                    break
                if block.hash in self:
                    logger.debug('Known %r', block)
                else:
                    assert block.has_parent()
                    success = self.add_block(block)
                    if success:
                        logger.debug('Added %r', block)

    def add_block(self, block):
        "returns True if block was added sucessfully"
        # make sure we know the parent
        if not block.has_parent() and not block.is_genesis():
            logger.debug('Missing parent for block %r', block)
            return False

        if not block.validate_uncles():
            logger.debug('Invalid uncles %r', block)
            return False

        # check PoW and forward asap in order to avoid stale blocks
        if not len(block.nonce) == 32:
            logger.debug('Nonce not set %r', block)
            return False
        elif not block.check_proof_of_work(block.nonce) and\
                not block.is_genesis():
            logger.debug('Invalid nonce %r', block)
            return False

        # FIXME: Forward blocks w/ valid PoW asap
        if block.has_parent():
            try:
                #logger.debug('verifying: %s', block)
                #logger.debug('GETTING ACCOUNT FOR COINBASE:')
                #acct = block.get_acct(block.coinbase)
                #logger.debug('GOT ACCOUNT FOR COINBASE: %r', acct)
                processblock.verify(block, block.get_parent())
            except processblock.VerificationFailed as e:
                logger.debug('### VERIFICATION FAILED ### %r', e)
                f = os.path.join(utils.data_dir, 'badblock.log')
                open(f, 'w').write(str(block.hex_serialize()))
                print block.hex_serialize()
                return False

        if block.number < self.head.number:
            logger.debug("%r is older than head %r", block, self.head)
            # Q: Should we have any limitations on adding blocks?

        self.index.add_block(block)
        self._store_block(block)

        # set to head if this makes the longest chain w/ most work for that number
        #logger.debug('Head: %r @%s  New:%r @%d', self.head, self.head.chain_difficulty(), block, block.chain_difficulty())
        if block.chain_difficulty() > self.head.chain_difficulty():
            logger.debug('New Head %r', block)
            self._update_head(block)
        elif block.number > self.head.number:
            logger.warn(
                '%r has higher blk number than head %r but lower chain_difficulty of %d vs %d',
                block, self.head, block.chain_difficulty(),
                self.head.chain_difficulty())
        self.commit()  # batch commits all changes that came with the new block

        return True

    def get_children(self, block):
        return [self.get(c) for c in self.index.get_children(block.hash)]

    def get_uncles(self, block):
        if not block.has_parent():
            return []
        parent = block.get_parent()
        o = []
        i = 0
        while parent.has_parent() and i < 6:
            grandparent = parent.get_parent()
            o.extend(
                [u for u in self.get_children(grandparent) if u != parent])
            parent = grandparent
            i += 1
        return o

    def add_transaction(self, transaction):
        logger.debug("add transaction %r" % transaction)
        with self.lock:
            res = self.miner.add_transaction(transaction)
            if res:
                logger.debug("broadcasting valid %r" % transaction)
                signals.send_local_transactions.send(
                    sender=None, transactions=[transaction])
            return res

    def get_transactions(self):
        logger.debug("get_transactions called")
        return self.miner.get_transactions()

    def get_chain(self, start='', count=NUM_BLOCKS_PER_REQUEST):
        "return 'count' blocks starting from head or start"
        logger.debug("get_chain: start:%s count%d", start.encode('hex'), count)
        blocks = []
        block = self.head
        if start:
            if start in self.index.db:
                return []
            block = self.get(start)
            if not self.in_main_branch(block):
                return []
        for i in range(count):
            blocks.append(block)
            if block.is_genesis():
                break
            block = block.get_parent()
        return blocks

    def in_main_branch(self, block):
        try:
            return block.hash == self.index.get_block_by_number(block.number)
        except KeyError:
            return False

    def get_descendants(self, block, count=1):
        logger.debug("get_descendants: %r ", block)
        assert block.hash in self
        block_numbers = range(block.number + 1,
                              min(self.head.number, block.number + count))
        return [
            self.get(self.index.get_block_by_number(n)) for n in block_numbers
        ]
示例#43
0
class ChainManager(StoppableLoopThread):
    """
    Manages the chain and requests to it.
    """

    # initialized after configure:
    genesis = None
    index = None
    miner = None
    blockchain = None
    synchronizer = None
    config = None

    def __init__(self):
        super(ChainManager, self).__init__()

    def configure(self, config, genesis=None, db=None):
        self.config = config
        if not db:
            db_path = utils.db_path(config.get('misc', 'data_dir'))
            log.info('opening chain', db_path=db_path)
            db = self.blockchain = DB(db_path)
        self.blockchain = db
        self.index = Index(db)
        if genesis:
            self._initialize_blockchain(genesis)
        log.debug('chain @', head_hash=self.head)
        self.genesis = blocks.genesis(db=db)
        log.debug('got genesis', genesis_hash=self.genesis)
        self.new_miner()
        self.synchronizer = Synchronizer(self)

    def _initialize_blockchain(self, genesis=None):
        log.info('Initializing new chain')
        if not genesis:
            genesis = blocks.genesis(self.blockchain)
            log.info('new genesis', genesis_hash=genesis)
            self.index.add_block(genesis)
        self._store_block(genesis)
        assert genesis == blocks.get_block(self.blockchain, genesis.hash)
        self._update_head(genesis)
        assert genesis.hash in self

    @property
    def head(self):
        if not self.config:
            self.configure(config.read_config())
        if not self.blockchain or 'HEAD' not in self.blockchain:
            self._initialize_blockchain()
        ptr = self.blockchain.get('HEAD')
        return blocks.get_block(self.blockchain, ptr)

    def _update_head(self, block):
        if not block.is_genesis():
            assert self.head.chain_difficulty() < block.chain_difficulty()
            if block.get_parent() != self.head:
                log.debug('New Head is on a different branch',
                          head_hash=block,
                          old_head_hash=self.head)
        self.blockchain.put('HEAD', block.hash)
        self.index.update_blocknumbers(self.head)
        self.new_miner()  # reset mining

    def get(self, blockhash):
        assert isinstance(blockhash, str)
        assert len(blockhash) == 32
        return blocks.get_block(self.blockchain, blockhash)

    def has_block(self, blockhash):
        assert isinstance(blockhash, str)
        assert len(blockhash) == 32
        return blockhash in self.blockchain

    def __contains__(self, blockhash):
        return self.has_block(blockhash)

    def _store_block(self, block):
        self.blockchain.put(block.hash, block.serialize())

    def commit(self):
        self.blockchain.commit()

    def loop_body(self):
        ts = time.time()
        pct_cpu = self.config.getint('misc', 'mining')
        if pct_cpu > 0:
            self.mine()
            delay = (time.time() - ts) * (100. / pct_cpu - 1)
            assert delay >= 0
            time.sleep(min(delay, 1.))
        else:
            time.sleep(.01)

    def new_miner(self):
        "new miner is initialized if HEAD is updated"
        # prepare uncles
        uncles = set(self.get_uncles(self.head))
        ineligible = set()  # hashes
        blk = self.head
        for i in range(8):
            for u in blk.uncles:  # assuming uncle headers
                u = utils.sha3(rlp.encode(u))
                if u in self:
                    uncles.discard(self.get(u))
            if blk.has_parent():
                blk = blk.get_parent()

        miner = Miner(self.head, uncles, self.config.get('wallet', 'coinbase'))
        if self.miner:
            for tx in self.miner.get_transactions():
                miner.add_transaction(tx)
        self.miner = miner

    def mine(self):
        with self.lock:
            block = self.miner.mine()
            if block:
                # create new block
                if not self.add_block(block, forward=True):
                    log.debug("newly mined block is invalid!?",
                              block_hash=block)
                    self.new_miner()

    def receive_chain(self, transient_blocks, peer=None):
        with self.lock:
            old_head = self.head
            # assuming to receive chain order w/ oldest block first
            transient_blocks.sort(key=attrgetter('number'))
            assert transient_blocks[0].number <= transient_blocks[-1].number

            # notify syncer
            self.synchronizer.received_blocks(peer, transient_blocks)

            for t_block in transient_blocks:  # oldest to newest
                log.debug('Checking PoW', block_hash=t_block)
                if not blocks.check_header_pow(t_block.header_args):
                    log.debug('Invalid PoW', block_hash=t_block)
                    continue
                log.debug('Deserializing', block_hash=t_block)
                try:
                    block = blocks.Block.deserialize(self.blockchain,
                                                     t_block.rlpdata)
                except processblock.InvalidTransaction as e:
                    # FIXME there might be another exception in
                    # blocks.deserializeChild when replaying transactions
                    # if this fails, we need to rewind state
                    log.debug('invalid transaction',
                              block_hash=t_block,
                              error=e)
                    # stop current syncing of this chain and skip the child blocks
                    self.synchronizer.stop_synchronization(peer)
                    return
                except blocks.UnknownParentException:
                    if t_block.prevhash == blocks.GENESIS_PREVHASH:
                        log.debug('Rec Incompatible Genesis',
                                  block_hash=t_block)
                        if peer:
                            peer.send_Disconnect(reason='Wrong genesis block')
                    else:  # should be a single newly mined block
                        assert t_block.prevhash not in self
                        assert t_block.prevhash != self.genesis.hash
                        log.debug('unknown parent',
                                  block_hash=t_block,
                                  parent_hash=t_block.prevhash.encode('hex'),
                                  remote_id=peer)
                        if len(transient_blocks) != 1:
                            # strange situation here.
                            # we receive more than 1 block, so it's not a single newly mined one
                            # sync/network/... failed to add the needed parent at some point
                            # well, this happens whenever we can't validate a block!
                            # we should disconnect!
                            log.warn('blocks received, but unknown parent.',
                                     num=len(transient_blocks))
                        if peer:
                            # request chain for newest known hash
                            self.synchronizer.synchronize_unknown_block(
                                peer, transient_blocks[-1].hash)
                    break
                if block.hash in self:
                    log.debug('known', block_hash=block)
                else:
                    assert block.has_parent()
                    # assume single block is newly mined block
                    forward = len(transient_blocks) == 1
                    success = self.add_block(block, forward=forward)
                    if success:
                        log.debug('added', block_hash=block)

    def add_block(self, block, forward=False):
        "returns True if block was added sucessfully"
        _log = log.bind(block_hash=block)
        # make sure we know the parent
        if not block.has_parent() and not block.is_genesis():
            _log.debug('missing parent')
            return False

        if not block.validate_uncles():
            _log.debug('invalid uncles')
            return False

        # check PoW and forward asap in order to avoid stale blocks
        if not len(block.nonce) == 32:
            _log.debug('nonce not set')
            return False
        elif not block.check_proof_of_work(block.nonce) and\
                not block.is_genesis():
            _log.debug('invalid nonce')
            return False
        # Forward block w/ valid PoW asap (if not syncing)
        # FIXME: filter peer by wich block was received
        if forward:
            _log.debug("broadcasting new")
            signals.broadcast_new_block.send(sender=None, block=block)

        if block.has_parent():
            try:
                processblock.verify(block, block.get_parent())
            except processblock.VerificationFailed as e:
                _log.critical('VERIFICATION FAILED', error=e)
                f = os.path.join(utils.data_dir, 'badblock.log')
                open(f, 'w').write(str(block.hex_serialize()))
                return False

        if block.number < self.head.number:
            _log.debug("older than head", head_hash=self.head)
            # Q: Should we have any limitations on adding blocks?

        self.index.add_block(block)
        self._store_block(block)

        # set to head if this makes the longest chain w/ most work for that number
        if block.chain_difficulty() > self.head.chain_difficulty():
            _log.debug('new head')
            self._update_head(block)
        elif block.number > self.head.number:
            _log.warn(
                'has higher blk number than head but lower chain_difficulty',
                head_hash=self.head,
                block_difficulty=block.chain_difficulty(),
                head_difficulty=self.head.chain_difficulty())
        self.commit()  # batch commits all changes that came with the new block
        return True

    def get_children(self, block):
        return [self.get(c) for c in self.index.get_children(block.hash)]

    def get_uncles(self, block):
        if not block.has_parent():
            return []
        parent = block.get_parent()
        o = []
        i = 0
        while parent.has_parent() and i < 6:
            grandparent = parent.get_parent()
            o.extend(
                [u for u in self.get_children(grandparent) if u != parent])
            parent = grandparent
            i += 1
        return o

    def add_transaction(self, transaction):
        _log = log.bind(tx_hash=transaction)
        _log.debug("add transaction")
        with self.lock:
            res = self.miner.add_transaction(transaction)
            if res:
                _log.debug("broadcasting valid")
                signals.send_local_transactions.send(
                    sender=None, transactions=[transaction])
            return res

    def get_transactions(self):
        log.debug("get_transactions called")
        return self.miner.get_transactions()

    def get_chain(self, start='', count=NUM_BLOCKS_PER_REQUEST):
        "return 'count' blocks starting from head or start"
        log.debug("get_chain", start=start.encode('hex'), count=count)
        blocks = []
        block = self.head
        if start:
            if start in self.index.db:
                return []
            block = self.get(start)
            if not self.in_main_branch(block):
                return []
        for i in range(count):
            blocks.append(block)
            if block.is_genesis():
                break
            block = block.get_parent()
        return blocks

    def in_main_branch(self, block):
        try:
            return block.hash == self.index.get_block_by_number(block.number)
        except KeyError:
            return False

    def get_descendants(self, block, count=1):
        log.debug("get_descendants", block_hash=block)
        assert block.hash in self
        block_numbers = range(block.number + 1,
                              min(self.head.number, block.number + count))
        return [
            self.get(self.index.get_block_by_number(n)) for n in block_numbers
        ]
示例#44
0
class Clients:
    __JWT = JWTController().__get_instance__
    __storage = StorageController().__get_instance__
    __sync = Synchronizer().__get_instance__

    def add_user(self):
        data = request.get_json()
        payload = data['payload']
        key = json.loads(self.__JWT.get_global_key)

        try:
            deserialized_data = self.__JWT.verify_token(payload, key)
        except InvalidJWSSignature as signErr:
            logging.error(f"Error in the verification {signErr}")
            return jsonify({
                "type":
                "error",
                "payload":
                self.__JWT.generate_token(
                    {
                        "type": "error",
                        "msg": "Error en la validación de la firma digital",
                        "system_msg": signErr.args
                    }, key['k']),
                "time":
                time.time()
            }), 401
        except Exception as err:
            logging.error(err)
            return jsonify({
                "type":
                "error",
                "payload":
                self.__JWT.generate_token(
                    {
                        "type": "Unrecognized error",
                        "msg": err.args
                    }, key['k']),
                "time":
                time.time()
            }), 401

        logging.debug(f"New user received {deserialized_data['name']}")

        key_for_user = self.__JWT.generate_new_symmetric_key()
        user = {
            "name": deserialized_data['name'],
            "key": json.loads(key_for_user),
            "created_time": time.time()
        }

        user = self.__storage.insert(user, self.__storage.db_clients)
        self.__sync.update()
        return jsonify({
            "type": "response",
            "payload": self.__JWT.generate_token(user, key['k']),
            "time": time.time()
        }), 200

    def delete_user(self, user):
        payload = user
        key = json.loads(self.__JWT.get_global_key)

        try:
            deserialized_data = self.__JWT.verify_token(payload, key)
        except InvalidJWSSignature as signErr:
            logging.error(f"Error in the verification {signErr}")
            return jsonify({
                "type":
                "error",
                "payload":
                self.__JWT.generate_token(
                    {
                        "type": "error",
                        "msg": "Error en la validación de la firma digital",
                        "system_msg": signErr.args
                    }, key['k']),
                "time":
                time.time()
            }), 401
        except Exception as err:
            logging.error(err)
            return jsonify({
                "type":
                "error",
                "payload":
                self.__JWT.generate_token(
                    {
                        "type": "Unrecognized error",
                        "msg": err.args
                    }, key['k']),
                "time":
                time.time()
            }), 401

        id = deserialized_data['id']

        self.__storage.delete(id, self.__storage.db_clients)
        self.__sync.update()
        return jsonify({
            "type":
            "response",
            "payload":
            self.__JWT.generate_token({
                "time": time.time(),
                "deleted": id
            }, key['k']),
            "time":
            time.time()
        }), 200

    def get_all_users(self):
        return jsonify(
            self.__storage.get_all_documents(self.__storage.db_clients))

    def get_user(self, user):
        payload = user
        key = json.loads(self.__JWT.get_global_key)

        try:
            deserialized_data = self.__JWT.verify_token(payload, key)
        except InvalidJWSSignature as signErr:
            logging.error(f"Error in the verification {signErr}")
            return jsonify({
                "type":
                "error",
                "payload":
                self.__JWT.generate_token(
                    {
                        "type": "error",
                        "msg": "Error en la validación de la firma digital",
                        "system_msg": signErr.args
                    }, key['k']),
                "time":
                time.time()
            }), 401
        except Exception as err:
            logging.error(err)
            return jsonify({
                "type":
                "error",
                "payload":
                self.__JWT.generate_token(
                    {
                        "type": "Unrecognized error",
                        "msg": err.args
                    }, key['k']),
                "time":
                time.time()
            }), 401

        try:
            return jsonify({
                "type":
                "response",
                "payload":
                self.__JWT.generate_token(
                    self.__storage.search(deserialized_data['id'],
                                          self.__storage.db_clients),
                    key['k']),
                "time":
                time.time()
            })
        except IndexError as err:
            logging.error(err)
            return jsonify({
                "type":
                "error",
                "payload":
                self.__JWT.generate_token(
                    {
                        "type": "Error",
                        "msg": "Id not founded"
                    }, key['k']),
                "time":
                time.time()
            })
示例#45
0
class ChainManager(StoppableLoopThread):

    """
    Manages the chain and requests to it.
    """

    def __init__(self):
        super(ChainManager, self).__init__()
        # initialized after configure
        self.miner = None
        self.blockchain = None
        self.synchronizer = Synchronizer(self)
        self.genesis = blocks.CachedBlock.create_cached(blocks.genesis())

    def configure(self, config, genesis=None):
        self.config = config
        logger.info('Opening chain @ %s', utils.get_db_path())
        db = self.blockchain = DB(utils.get_db_path())
        self.index = Index(db)
        if genesis:
            self._initialize_blockchain(genesis)
        logger.debug('Chain @ #%d %s', self.head.number, self.head.hex_hash())
        self.new_miner()

    @property
    def head(self):
        if 'HEAD' not in self.blockchain:
            self._initialize_blockchain()
        ptr = self.blockchain.get('HEAD')
        return blocks.get_block(ptr)

    def _update_head(self, block):
        if not block.is_genesis():
            assert self.head.chain_difficulty() < block.chain_difficulty()
            if block.get_parent() != self.head:
                logger.debug('New Head %r is on a different branch. Old was:%r', block, self.head)
        self.blockchain.put('HEAD', block.hash)
        self.index.update_blocknumbers(self.head)
        self.new_miner()  # reset mining

    def get(self, blockhash):
        assert isinstance(blockhash, str)
        assert len(blockhash) == 32
        return blocks.get_block(blockhash)

    def has_block(self, blockhash):
        assert isinstance(blockhash, str)
        assert len(blockhash) == 32
        return blockhash in self.blockchain

    def __contains__(self, blockhash):
        return self.has_block(blockhash)

    def _store_block(self, block):
        self.blockchain.put(block.hash, block.serialize())

    def commit(self):
        self.blockchain.commit()

    def _initialize_blockchain(self, genesis=None):
        logger.info('Initializing new chain @ %s', utils.get_db_path())
        if not genesis:
            genesis = blocks.genesis()
            self.index.add_block(genesis)
        self._store_block(genesis)
        self._update_head(genesis)
        assert genesis.hash in self

    def loop_body(self):
        ts = time.time()
        pct_cpu = self.config.getint('misc', 'mining')
        if pct_cpu > 0:
            self.mine()
            delay = (time.time() - ts) * (100. / pct_cpu - 1)
            time.sleep(min(delay, 1.))
        else:
            time.sleep(.01)

    def new_miner(self):
        "new miner is initialized if HEAD is updated"
        # prepare uncles
        uncles = set(self.get_uncles(self.head))
#        logger.debug('%d uncles for next block %r', len(uncles), uncles)
        ineligible = set() # hashes
        blk = self.head
        for i in range(8):
            for u in blk.uncles: # assuming uncle headres
                u = utils.sha3(rlp.encode(u))
                if u in self:
#                    logger.debug('ineligible uncle %r', u.encode('hex'))
                    uncles.discard(self.get(u))
            if blk.has_parent():
                blk = blk.get_parent()
#        logger.debug('%d uncles after filtering %r', len(uncles), uncles)

        miner = Miner(self.head, uncles, self.config.get('wallet', 'coinbase'))
        if self.miner:
            for tx in self.miner.get_transactions():
                miner.add_transaction(tx)
        self.miner = miner

    def mine(self):
        with self.lock:
            block = self.miner.mine()
            if block:
                # create new block
                if self.add_block(block):
                    logger.debug("broadcasting new %r" % block)
                    signals.broadcast_new_block.send(sender=None, block=block)
                else:
                    self.new_miner()

    def receive_chain(self, transient_blocks, peer=None):
        with self.lock:
            old_head = self.head
            # assuming to receive chain order w/ oldest block first
            transient_blocks.sort(key=attrgetter('number'))
            assert transient_blocks[0].number <= transient_blocks[-1].number

            # notify syncer
            self.synchronizer.received_blocks(peer, transient_blocks)

            for t_block in transient_blocks: # oldest to newest
                logger.debug('Deserializing %r', t_block)
                #logger.debug(t_block.rlpdata.encode('hex'))
                try:
                    block = blocks.Block.deserialize(t_block.rlpdata)
                except processblock.InvalidTransaction as e:
                    # FIXME there might be another exception in
                    # blocks.deserializeChild when replaying transactions
                    # if this fails, we need to rewind state
                    logger.debug('%r w/ invalid Transaction %r', t_block, e)
                    # stop current syncing of this chain and skip the child blocks
                    self.synchronizer.stop_synchronization(peer)
                    return
                except blocks.UnknownParentException:
                    if t_block.prevhash == blocks.GENESIS_PREVHASH:
                        logger.debug('Rec Incompatible Genesis %r', t_block)
                        if peer:
                            peer.send_Disconnect(reason='Wrong genesis block')
                    else: # should be a single newly mined block
                        assert t_block.prevhash not in self
                        assert t_block.prevhash != blocks.genesis().hash
                        logger.debug('%s with unknown parent %s, peer:%r', t_block, t_block.prevhash.encode('hex'), peer)
                        if len(transient_blocks) != 1:
                            # strange situation here.
                            # we receive more than 1 block, so it's not a single newly mined one
                            # sync/network/... failed to add the needed parent at some point
                            # well, this happens whenever we can't validate a block!
                            # we should disconnect!
                            logger.warn('%s received, but unknown parent.',len(transient_blocks))
                        if peer:
                            # request chain for newest known hash
                            self.synchronizer.synchronize_unknown_block(peer, transient_blocks[-1].hash)
                    break
                if block.hash in self:
                    logger.debug('Known %r', block)
                else:
                    assert block.has_parent()
                    success = self.add_block(block)
                    if success:
                        logger.debug('Added %r', block)

    def add_block(self, block):
        "returns True if block was added sucessfully"
        # make sure we know the parent
        if not block.has_parent() and not block.is_genesis():
            logger.debug('Missing parent for block %r', block)
            return False

        if not block.validate_uncles():
            logger.debug('Invalid uncles %r', block)
            return False

        # check PoW and forward asap in order to avoid stale blocks
        if not len(block.nonce) == 32:
            logger.debug('Nonce not set %r', block)
            return False
        elif not block.check_proof_of_work(block.nonce) and\
                not block.is_genesis():
            logger.debug('Invalid nonce %r', block)
            return False

        # FIXME: Forward blocks w/ valid PoW asap
        if block.has_parent():
            try:
                #logger.debug('verifying: %s', block)
                #logger.debug('GETTING ACCOUNT FOR COINBASE:')
                #acct = block.get_acct(block.coinbase)
                #logger.debug('GOT ACCOUNT FOR COINBASE: %r', acct)
                processblock.verify(block, block.get_parent())
            except processblock.VerificationFailed as e:
                logger.debug('%r', e)
                return False

        if block.number < self.head.number:
            logger.debug("%r is older than head %r", block, self.head)
            # Q: Should we have any limitations on adding blocks?

        self.index.add_block(block)
        self._store_block(block)

        # set to head if this makes the longest chain w/ most work for that number
        #logger.debug('Head: %r @%s  New:%r @%d', self.head, self.head.chain_difficulty(), block, block.chain_difficulty())
        if block.chain_difficulty() > self.head.chain_difficulty():
            logger.debug('New Head %r', block)
            self._update_head(block)
        elif block.number > self.head.number:
            logger.warn('%r has higher blk number than head %r but lower chain_difficulty of %d vs %d',
                                block, self.head, block.chain_difficulty(), self.head.chain_difficulty())
        self.commit() # batch commits all changes that came with the new block

        return True


    def get_children(self, block):
        return [self.get(c) for c in self.index.get_children(block.hash)]

    def get_uncles(self, block):
        if not block.has_parent():
            return []
        parent = block.get_parent()
        o = []
        i = 0
        while parent.has_parent() and i < 6:
            grandparent = parent.get_parent()
            o.extend([u for u in self.get_children(grandparent) if u != parent])
            parent = grandparent
            i += 1
        return o

    def add_transaction(self, transaction):
        logger.debug("add transaction %r" % transaction)
        with self.lock:
            res = self.miner.add_transaction(transaction)
            if res:
                logger.debug("broadcasting valid %r" % transaction)
                signals.send_local_transactions.send(
                    sender=None, transactions=[transaction])
            return res

    def get_transactions(self):
        logger.debug("get_transactions called")
        return self.miner.get_transactions()

    def get_chain(self, start='', count=NUM_BLOCKS_PER_REQUEST):
        "return 'count' blocks starting from head or start"
        logger.debug("get_chain: start:%s count%d", start.encode('hex'), count)
        blocks = []
        block = self.head
        if start:
            if start in self.index.db:
                return []
            block = self.get(start)
            if not self.in_main_branch(block):
                return []
        for i in range(count):
            blocks.append(block)
            if block.is_genesis():
                break
            block = block.get_parent()
        return blocks

    def in_main_branch(self, block):
        try:
            return block.hash == self.index.get_block_by_number(block.number)
        except KeyError:
            return False

    def get_descendants(self, block, count=1):
        logger.debug("get_descendants: %r ", block)
        assert block.hash in self
        block_numbers = range(block.number+1, min(self.head.number, block.number+count))
        return [self.get(self.index.get_block_by_number(n)) for n in block_numbers]
示例#46
0
文件: app.py 项目: rockem/MediaSync
def main(source, target, filter):
    synchronizer = Synchronizer(source, target)
    synchronizer.filters = [TagFilter(f) for f in filter]
    synchronizer.sync()
示例#47
0
 def start_content_synchronizer(self): 
     self._synchronizer = Synchronizer(self._config_manager.local_address,
                                       self._config_manager.local_port,
                                       self._config_manager.type,
                                       self._config_manager.content_db)
     self._synchronizer.start()