예제 #1
0
 async def run(self):
     for chunk in chunks(self.generate_links(), self.config.chunk_size):
         tasks = await self.create_task_group(chunk)
         await self.process_task_group(tasks)
         self.cleanup_task_group()
     await self.close_sessions()
     await self.conn.close()
예제 #2
0
    def _compact_hashX(self, hashX, hist_map, hist_list, write_items,
                       keys_to_delete):
        '''Compres history for a hashX.  hist_list is an ordered list of
        the histories to be compressed.'''
        # History entries (tx numbers) are 4 bytes each.  Distribute
        # over rows of up to 50KB in size.  A fixed row size means
        # future compactions will not need to update the first N - 1
        # rows.
        max_row_size = self.max_hist_row_entries * 4
        full_hist = b''.join(hist_list)
        nrows = (len(full_hist) + max_row_size - 1) // max_row_size
        if nrows > 4:
            self.log_info(
                'hashX {} is large: {:,d} entries across {:,d} rows'.format(
                    hash_to_str(hashX),
                    len(full_hist) // 4, nrows))

        # Find what history needs to be written, and what keys need to
        # be deleted.  Start by assuming all keys are to be deleted,
        # and then remove those that are the same on-disk as when
        # compacted.
        write_size = 0
        keys_to_delete.update(hist_map)
        for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
            key = hashX + pack('>H', n)
            if hist_map.get(key) == chunk:
                keys_to_delete.remove(key)
            else:
                write_items.append((key, chunk))
                write_size += len(chunk)

        assert n + 1 == nrows
        self.comp_flush_count = max(self.comp_flush_count, n)

        return write_size
예제 #3
0
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info('faking a reorg of {:,d} blocks'.format(count))
        await self.controller.run_in_executor(self.flush, True)

        hashes, start, count = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_str(hash) for hash in reversed(hashes)]
        # get saved evntlog hashYs
        if hashes:
            eventlog_hashYs = reduce(
                operator.add, [self.get_block_hashYs(x) for x in hashes])
        else:
            eventlog_hashYs = []
        self.logger.info('chain reorg eventlog_hashYs {} {}'.format(
            eventlog_hashYs, hashes))

        for hex_hashes in chunks(hashes, 50):
            blocks = await self.daemon.raw_blocks(hex_hashes)
            await self.controller.run_in_executor(self.backup_blocks, blocks,
                                                  eventlog_hashYs)
        await self.prefetcher.reset_height()
예제 #4
0
파일: api.py 프로젝트: cctvai/spark-ar-tv
 def fetch_videos(self, ids, batch_size=20):
     result = {}
     batches = util.chunks(ids, batch_size)
     for batch in batches:
         response = self.fetch(','.join(batch))
         data = self.save_videos(response)
         result.update(data)
     return result
예제 #5
0
 def _upload_bfmr(self, numbers) -> None:
     former_headless = self.driver_creator.args.headless
     self.driver_creator.args.headless = False
     driver = self._login_bfmr()
     try:
         for batch in util.chunks(numbers, 100):
             self._upload_bfmr_batch(driver, batch)
     finally:
         driver.quit()
     self.driver_creator.args.headless = former_headless
예제 #6
0
파일: api.py 프로젝트: cctvai/spark-ar-tv
 def fetch_channels(self, ids):
     batches = util.chunks(ids, 30)
     results = []
     for batch in batches:
         request = self.get_youtube().channels().list(
             part="snippet,contentDetails",
             id=','.join(batch),
         )
         response = request.execute()
         results += [Channel(item) for item in response.get('items')]
     return results
예제 #7
0
def pool_gen_global_site_channels(pool, master):
    config = master.config
    site = master.global_site

    pages = []
    out_dir = "%s/global" % (config.out_dir)
    results = []
    if site.gen_channel_html and config.channel:
        util.mkdir("%s/channels" % out_dir)
        chunks = util.chunks(site.groups, 10)
        for chunk in chunks:
            results.append(
                pool.apply_async(single_channel_pages, (site, config, chunk)))
    return results
예제 #8
0
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info('faking a reorg of {:,d} blocks'.format(count))
        await self.controller.run_in_executor(self.flush, True)

        hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_str(hash) for hash in reversed(hashes)]
        for hex_hashes in chunks(hashes, 50):
            blocks = await self.daemon.raw_blocks(hex_hashes)
            await self.controller.run_in_executor(self.backup_blocks, blocks)
        await self.prefetcher.reset_height()
예제 #9
0
def test_chunks():
    assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
 def _upload_bfmr(self, numbers) -> None:
     for batch in util.chunks(numbers, 100):
         self._upload_bfmr_batch(batch)