Exemplo n.º 1
0
    def on_template(cls, is_new_block):
        '''This is called when TemplateRegistry registers
           new block which we have to broadcast clients.'''
        start = Interfaces.timestamper.time()
        clean_jobs = is_new_block
        #(job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, _, rsk_job) = \
        #                Interfaces.template_registry.get_last_broadcast_args()
        bc_args = Interfaces.template_registry.get_last_broadcast_args()
        (job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits,
         ntime, _, rsk_flag) = bc_args
        # Push new job to subscribed clients
        cls.emit(job_id, prevhash, coinb1, coinb2, merkle_branch, version,
                 nbits, ntime, clean_jobs)

        cnt = Pubsub.get_subscription_count(cls.event)
        log.info("BROADCASTED to %d connections in %.03f sec" %
                 (cnt, (Interfaces.timestamper.time() - start)))
        if rsk_flag:
            log.info(
                json.dumps({
                    "rsk": "[RSKLOG]",
                    "tag": "[RSK_BLOCK_RECEIVED_END]",
                    "uuid": util.id_generator(),
                    "start": start,
                    "elapsed": Interfaces.timestamper.time() - start,
                    "data": bc_args,
                    "clients": cnt
                }))
        else:
            log.info(
                json.dumps({
                    "rsk": "[RSKLOG]",
                    "tag": "[BTC_BLOCK_RECEIVED_END]",
                    "uuid": util.id_generator(),
                    "start": start,
                    "elapsed": Interfaces.timestamper.time() - start,
                    "data": bc_args,
                    "clients": cnt
                }))
        log.info(
            json.dumps({
                "rsk": "[RSKLOG]",
                "tag": "[WORK_SENT]",
                "uuid": util.id_generator(),
                "start": start,
                "elapsed": Interfaces.timestamper.time() - start
            }))
Exemplo n.º 2
0
    def _finish_after_subscribe(self, result):
        '''Send new job to newly subscribed client'''
        start = Interfaces.timestamper.time()
        try:
            bc_args = Interfaces.template_registry.get_last_broadcast_args()
            (job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits,
             ntime, _, _) = bc_args
        except Exception as e:
            log.info("EXCEPTION: %s - %s", e, result)
            log.error("Template not ready yet")
            return result

        # Force set higher difficulty
        if settings.RSK_DEV_MODE and hasattr(settings,
                                             'RSK_STRATUM_DIFFICULTY'):
            self.connection_ref().rpc('mining.set_difficulty', [
                settings.RSK_STRATUM_DIFFICULTY,
            ],
                                      is_notification=True)
        #self.connection_ref().rpc('client.get_version', [])

        # Force client to remove previous jobs if any (eg. from previous connection)
        clean_jobs = True
        self.emit_single(job_id, prevhash, coinb1, coinb2, merkle_branch,
                         version, nbits, ntime, clean_jobs)

        log.info(
            json.dumps({
                "uuid": util.id_generator(),
                "rsk": "[RSKLOG]",
                "tag": "[WORK_SENT_OLD]",
                "start": start,
                "elapsed": Interfaces.timestamper.time() - start,
                "data": bc_args
            }))

        return result
Exemplo n.º 3
0
    async def fetch_ohlvc_data(exchange, symbol: str, resolution: int,
                               since: str, until: str, limit: int,
                               proxies: List[str]):
        """ Fetches the OHLCV candles in batches

        :param exchange: the exchange to fetch the data for
        :param symbol: the symbol to fetch the data for
        :param resolution: the resolution of the OHLCV data
        :param since: start date
        :param until: end date
        :param limit: how many data points to fetch in one call
        :param proxies: a list of proxies to rotate through
        :return: writes the ohlcv time series to the persistence adapter
        """

        # convert datetime strings to timestamp in milliseconds
        since = exchange.parse8601(since)
        until = exchange.parse8601(until)

        # filename
        file_name = f"{str(exchange.id).upper()}_{str(symbol).replace('/', '')}_{resolution}.csv"

        # loop until we fetched everything or forever ( if until = 0 )
        while True:

            # if we defined one or more proxies in our config we'd like to use a random one for this specific exchange
            if len(proxies):
                # if the proxy accepts a session id, set a new one per request
                exchange.aiohttp_proxy = str(random.choice(proxies)).replace(
                    '{rand}', util.id_generator())
                exchange.headers = {'Connection': 'close'}

            # exit the loop if we fetched the whole time series
            if since > until and until is not 0:
                break

            try:
                logging.info(
                    'Start fetching %s data points from %s for %s and timestamp %s (%s)',
                    limit, exchange.id, symbol, since,
                    datetime.utcfromtimestamp(
                        util.ms_timestamp_to_epoch_timestamp(since)))

                ohlcv_ts = pd.DataFrame(
                    data=await exchange.fetch_ohlcv(symbol, resolution, since,
                                                    limit),
                    columns=util.ohlcv_columns(),
                )
                logging.info(f'Received {len(ohlcv_ts.index)} data points')
            except ValueError as error:
                logging.fatal(error)
                sys.exit(1)

            except (ccxt.ExchangeError, ccxt.AuthenticationError,
                    ccxt.ExchangeNotAvailable, ccxt.RequestTimeout,
                    ccxt.DDoSProtection) as error:
                logging.error(
                    f'Got an error {type(error).__name__} {error.args}. Will try to send the same Request again.',
                )
                # skip current iteration and try again if we run into an exception
                continue

            # write data frame to csv
            io.write_csv(f'data/{file_name}', ohlcv_ts, index=False, mode='a')

            # calculate time between the two last candles ( resolution of one candle in ms )
            # and set the timestamp of the next candle so we have it in place for the next request
            resolution_ms = util.detect_resolution(
                datetime.utcfromtimestamp(
                    util.ms_timestamp_to_epoch_timestamp(
                        int(ohlcv_ts['datetime'].iloc[-1]))),
                datetime.utcfromtimestamp(
                    util.ms_timestamp_to_epoch_timestamp(
                        int(ohlcv_ts['datetime'].iloc[-2]))))

            # update since
            since = int(ohlcv_ts['datetime'].iloc[-1]) + resolution_ms
Exemplo n.º 4
0
    def submit(self, worker_name, job_id, extranonce2, ntime, nonce):
        '''Try to solve block candidate using given parameters.'''

        session = self.connection_ref().get_session()
        session.setdefault('authorized', {})

        # Check if worker is authorized to submit shares
        if not Interfaces.worker_manager.authorize(
                worker_name, session['authorized'].get(worker_name)):
            raise SubmitException("Worker is not authorized")

        start = Interfaces.timestamper.time()

        # Check if extranonce1 is in connection session
        extranonce1_bin = session.get('extranonce1', None)
        if not extranonce1_bin:
            raise SubmitException("Connection is not subscribed for mining")

        if settings.RSK_DEV_MODE and hasattr(settings,
                                             'RSK_STRATUM_DIFFICULTY'):
            difficulty = settings.RSK_STRATUM_DIFFICULTY
        else:
            difficulty = session['difficulty']
        submit_time = Interfaces.timestamper.time()

        Interfaces.share_limiter.submit(self.connection_ref, difficulty,
                                        submit_time)

        # This checks if submitted share meet all requirements
        # and it is valid proof of work.
        try:
            (block_header, block_hash, on_submit,
             on_submit_rsk) = Interfaces.template_registry.submit_share(
                 job_id, worker_name, extranonce1_bin, extranonce2, ntime,
                 nonce, difficulty)
        except SubmitException as e:
            log.error("SUBMIT EXCEPTION: %s", e)
            # block_header and block_hash are None when submitted data are corrupted
            Interfaces.share_manager.on_submit_share(worker_name, None, None,
                                                     difficulty, submit_time,
                                                     False)
            raise

        Interfaces.share_manager.on_submit_share(worker_name, block_header,
                                                 block_hash, difficulty,
                                                 submit_time, True)

        if on_submit != None:
            # Pool performs submitblock() to bitcoind. Let's hook
            # to result and report it to share manager
            on_submit.addCallback(Interfaces.share_manager.on_submit_block,
                                  worker_name, block_header, block_hash,
                                  submit_time)

        if on_submit_rsk != None:
            # Pool performs submitBitcoinBlockPartialMerkle() to rskd. Let's hook
            # to result and report it to share manager
            on_submit_rsk.addCallback(
                Interfaces.share_manager.on_submit_block_rsk, worker_name,
                block_header, block_hash, submit_time)

        if on_submit or on_submit_rsk:

            log.info(
                json.dumps({
                    "uuid": util.id_generator(),
                    "rsk": "[RSKLOG]",
                    "tag": "[SUBMITBLOCK_END]",
                    "start": submit_time,
                    "elapsed": Interfaces.timestamper.time() - submit_time,
                    "data": (block_hash, job_id)
                }))

        return True