Esempio n. 1
0
    def _do_sync_sessions(self):
        start = posix_time()  #@UndefinedVariable

        # There are some connections
        if len(self._to_sync_connections) > 0:
            conns = self._to_sync_connections.pop()

            for connection in conns:
                session = connection.get_session()
                # Report all events
                self.new_subscription(session)
                self.difficulty_changed(session)
                for worker_stats in session['SM_worker_stats'].values():
                    self.new_authorization(session, worker_stats)
                    self.worker_stats_changed(worker_stats)

        if len(self._to_sync_connections) > 0:
            self._plan_sync_process()
        else:
            # Flush all messages into delivery queue and send SYNC message
            self._flush_and_send_sync()
            if self._dropped_from_last_sync:
                log.error("Dropped objects from sync. start")

        log.info("Sync. batch took %.3fs" %
                 (posix_time() - start, ))  #@UndefinedVariable
Esempio n. 2
0
    def _sync_sessions(self):
        start = posix_time()

        self._dropped_from_last_sync = False

        all_conns = collections.deque()
        curr_list = []
        count = 0
        for connection_ref in ConnectionRegistry.iterate():
            try:
                curr_list.append(connection_ref())
                count += 1
            except:
                pass
            if count >= config.REPORTER_SYNC_SESSIONS_BATCH_SIZE:  #@UndefinedVariable
                all_conns.append(curr_list)
                curr_list = []

        if len(curr_list) > 0:
            all_conns.append(curr_list)
        self._to_sync_connections = all_conns

        self._plan_sync_process()

        log.info("Sync. start took %.3fs" %
                 (posix_time() - start, ))  #@UndefinedVariable
Esempio n. 3
0
    def _sync_sessions(self):
        start = posix_time()

        self._dropped_from_last_sync = False

        all_conns = collections.deque()
        curr_list = []
        count = 0
        for connection_ref in ConnectionRegistry.iterate():
            try:
                curr_list.append(connection_ref())
                count += 1
            except:
                pass
            if count >= config.REPORTER_SYNC_SESSIONS_BATCH_SIZE: #@UndefinedVariable
                all_conns.append(curr_list)
                curr_list = []

        if len(curr_list) > 0:
            all_conns.append(curr_list)
        self._to_sync_connections = all_conns

        self._plan_sync_process()

        log.info("Sync. start took %.3fs" % (posix_time() - start,)) #@UndefinedVariable
Esempio n. 4
0
    def _do_sync_sessions(self):
        start = posix_time() #@UndefinedVariable

        # There are some connections
        if len(self._to_sync_connections) > 0:
            conns = self._to_sync_connections.pop()

            for connection in conns:
                session = connection.get_session()
                # Report all events
                self.new_subscription(session)
                self.difficulty_changed(session)
                for worker_stats in session['SM_worker_stats'].values():
                    self.new_authorization(session, worker_stats)
                    self.worker_stats_changed(worker_stats)

        if len(self._to_sync_connections) > 0:
            self._plan_sync_process()
        else:
            # Flush all messages into delivery queue and send SYNC message
            self._flush_and_send_sync()
            if self._dropped_from_last_sync:
                log.error("Dropped objects from sync. start")

        log.info("Sync. batch took %.3fs" % (posix_time() - start,)) #@UndefinedVariable
Esempio n. 5
0
    def subscribe(self, *args):
        connection = self.connection_ref()
        session = connection.get_session()

        if session.get('extranonce1'):
            # Already subscribed

            subs1 = Pubsub.get_subscription(connection, DifficultySubscription.event)
            subs2 = Pubsub.get_subscription(connection, MiningSubscription.event)

            extranonce1_hex = binascii.hexlify(session['extranonce1'])
            extranonce2_size = Interfaces.template_registry.extranonce2_size

            log.warning('Already subscribed')
            return (((subs1.event, subs1.get_key()), (subs2.event, subs2.get_key())),) + \
                   (extranonce1_hex, extranonce2_size)

        extranonce1 = Interfaces.template_registry.get_new_extranonce1()
        extranonce2_size = Interfaces.template_registry.extranonce2_size
        extranonce1_hex = binascii.hexlify(extranonce1)

        session['extranonce1'] = extranonce1
        session['subscribed_at'] = posix_time()
        # Don't accept job_id if job_id < min_job_id
        session['min_job_id'] = 0
        session['client_sw'] = self._get_client_sw(*args)

        subs1 = Pubsub.subscribe(connection, DifficultySubscription())[0]
        subs2 = Pubsub.subscribe(connection, MiningSubscription())[0]

        Interfaces.reporter.new_subscription(session)

        return ((subs1, subs2),) + (extranonce1_hex, extranonce2_size)
    def update_blank_block(self, prevhash):
        '''Pick current block, replaces it's prevhash and broadcast
        it as a new template to client. This is work-around for slow
        processing of blocks in bitcoind.'''

        start = posix_time()

        template = self.block_template_class(Interfaces.timestamper, self.coinbaser, JobIdGenerator.get_new_id())
        template.fill_from_another(self._last_template, prevhash)
        self._add_template(template)

        log.info("Blank block update finished, %.03f sec, %d txes" % \
                    (posix_time() - start, len(template.vtx)))

        # Now let's do standard update (with transactions)
        self.update_block()
Esempio n. 7
0
    def _report_worker_stats(self):
        try:
            # Take the oldest session queue and put
            expired_queue = self._ws_delayed_queues.popleft()
            expired_noticed = self._ws_delayed_noticed.popleft()

            # Take a timestamp for the reporting
            timestamp = posix_time()

            # Remove the just reporting updates from noticed set
            self._ws_noticed_all.difference_update(expired_noticed)
            # Make events from expired queued session
            get_event = self._work_update_event
            events = [get_event(ws, timestamp) for ws in expired_queue]

            # Add the youngest facts into the delay queues
            self._ws_delayed_queues.append(self._ws_current_queue)
            self._ws_delayed_noticed.append(self._ws_current_noticed)
            self._ws_current_noticed = set()
            self._ws_current_queue = []

            if len(events) > 0:
                # Send the report message with all events as a payload
                msg = ['WSCH', int(timestamp * 1000),
                       events]  #@UndefinedVariable
                self._queue.deliver(msg)

        finally:
            reactor.callLater(
                float(
                    config.REPORTER_WORKER_STATS_DELAY)  #@UndefinedVariable \
                / config.REPORTER_WORKER_STATS_PARTS,  #@UndefinedVariable
                self._report_worker_stats)
Esempio n. 8
0
 def _report_simple_event(self, ev_name):
     events = self._simple_events[ev_name]
     if len(events) > 0:
         self._simple_events[ev_name] = []
         self._queue.deliver([ev_name,
                              int(posix_time() * 1000),
                              events])  #@UndefinedVariable
    def run(self):
        try:
            template = self.registry.get_last_template()
            if template:
                current_prevhash = "%064x" % template.hashPrevBlock
            else:
                current_prevhash = None

            # Get prevhash
            if config.BITCOIN_VERSION_0_9_PLUS:
                prevhash = yield self.bitcoin_rpc.get_best_block_hash()
            else:
                prevhash = util.reverse_hash((yield self.bitcoin_rpc.prevhash()))

            if prevhash and prevhash != current_prevhash:
                log.info("New block! Prevhash: %s" % prevhash)
                self.registry.update_blank_block(prevhash)

            elif posix_time() - self.registry.last_update >= config.MERKLE_REFRESH_INTERVAL:
                log.info("Merkle update! Prevhash: %s" % prevhash)
                self.registry.update_block()

        except Exception:
            log.exception("UpdateWatchdog.run failed")
        finally:
            self.schedule()
    def _update_block(self, bitcoind_rpc_template):
        try:
            if isinstance(bitcoind_rpc_template, dict):
                start = posix_time()

                template = self.block_template_class(Interfaces.timestamper, self.coinbaser, JobIdGenerator.get_new_id())
                template.fill_from_rpc(bitcoind_rpc_template)
                self._add_template(template)

                log.info("Update finished, %.03f sec, %d txes, %s BTC" % \
                         (posix_time() - start, len(template.vtx), Decimal(template.get_value())/10**8)) #@UndefinedVariable
            else:
                log.error("Invalid data for block update: %s" % (bitcoind_rpc_template, ))
        finally:
            self.update_in_progress = False
        return bitcoind_rpc_template
Esempio n. 11
0
    def _report_worker_stats(self):
        try:
            # Take the oldest session queue and put
            expired_queue = self._ws_delayed_queues.popleft()
            expired_noticed = self._ws_delayed_noticed.popleft()

            # Take a timestamp for the reporting
            timestamp = posix_time()

            # Remove the just reporting updates from noticed set
            self._ws_noticed_all.difference_update(expired_noticed)
            # Make events from expired queued session
            get_event = self._work_update_event
            events = [get_event(ws, timestamp) for ws in expired_queue]

            # Add the youngest facts into the delay queues
            self._ws_delayed_queues.append(self._ws_current_queue)
            self._ws_delayed_noticed.append(self._ws_current_noticed)
            self._ws_current_noticed = set()
            self._ws_current_queue = []

            if len(events) > 0:
                # Send the report message with all events as a payload
                msg = ['WSCH', int(timestamp * 1000), events] #@UndefinedVariable
                self._queue.deliver(msg)

        finally:
            reactor.callLater(float(config.REPORTER_WORKER_STATS_DELAY) #@UndefinedVariable \
                              / config.REPORTER_WORKER_STATS_PARTS, #@UndefinedVariable
                              self._report_worker_stats)
Esempio n. 12
0
    def update_stats_by_no_submit(self, session):
        now = posix_time()

        for worker_stats in session['SM_worker_stats'].itervalues():
            # Only update the stats when there was already at least one valid share
            # or some other fact could change
            if worker_stats['last_valid_share'] or \
                    worker_stats['invalid_submits'] > 0 or \
                    worker_stats['stale_submits'] > 0:
                self._update_stats_by_submit(worker_stats, 0, now)
Esempio n. 13
0
    def _process_no_submits(self):
        # Plan the next execution
        reactor.callLater(
            self._NO_SHARE_RECALCULATION_PERIOD,  #@UndefinedVariable
            self._process_no_submits)

        # Statistics
        start = posix_time()
        total, changes = 0, 0

        for connection_ref in self._no_submit_conn_refs.iterkeyrefs():
            try:
                connection = connection_ref()
                session = connection.get_session()
                total += 1
            except:
                # Not connected
                continue

            # Skip not initialized sessions
            if not session.get('session_id'):
                continue

            # Decrease hash rate for all authorized workers
            Interfaces.share_manager.update_stats_by_no_submit(session)

            # Shortcut for the slowest miners, recalculation is not necessary
            if session['SL_difficulty'] == 1 and not session[
                    'SL_requested_difficulty']:
                continue

            # Difficulty recalculation should almost always lead to new difficulty here
            if self._recalculate(connection, session):
                changes += 1

        # Log results only when at least one connection has finished its short period
        if changes > 0:
            log.info("No submits processed in %.03fs, %d / %d (chng, total)" % \
                     (posix_time() - start, changes, total))

        # Get new shallow copy of all connections
        self._no_submit_conn_refs = ConnectionRegistry.get_shallow_copy()
Esempio n. 14
0
    def _reset_submission_collection(session):
        now = posix_time()

        # Number of submits collected during the current full collection period
        session['SL_submits'] = 0
        session['SL_first_part_submits'] = 0

        session['SL_start_time'] = now
        session['SL_first_part_end_time'] = now

        session['SL_last_recalculation_time'] = now
        session['SL_submission_rate'] = None
Esempio n. 15
0
    def _reset_submission_collection(session):
        now = posix_time()

        # Number of submits collected during the current full collection period
        session['SL_submits'] = 0
        session['SL_first_part_submits'] = 0

        session['SL_start_time'] = now
        session['SL_first_part_end_time'] = now

        session['SL_last_recalculation_time'] = now
        session['SL_submission_rate'] = None
Esempio n. 16
0
    def update_stats_by_no_submit(self, session):
        now = posix_time()

        for worker_stats in session["SM_worker_stats"].itervalues():
            # Only update the stats when there was already at least one valid share
            # or some other fact could change
            if (
                worker_stats["last_valid_share"]
                or worker_stats["invalid_submits"] > 0
                or worker_stats["stale_submits"] > 0
            ):
                self._update_stats_by_submit(worker_stats, 0, now)
Esempio n. 17
0
    def _process_no_submits(self):
        # Plan the next execution
        reactor.callLater(self._NO_SHARE_RECALCULATION_PERIOD, #@UndefinedVariable
                          self._process_no_submits)

        # Statistics
        start = posix_time()
        total, changes = 0, 0

        for connection_ref in self._no_submit_conn_refs.iterkeyrefs():
            try:
                connection = connection_ref()
                session = connection.get_session()
                total += 1
            except:
                # Not connected
                continue

            # Skip not initialized sessions
            if not session.get('session_id'):
                continue

            # Decrease hash rate for all authorized workers
            Interfaces.share_manager.update_stats_by_no_submit(session)

            # Shortcut for the slowest miners, recalculation is not necessary
            if session['SL_difficulty'] == 1 and not session['SL_requested_difficulty']:
                continue

            # Difficulty recalculation should almost always lead to new difficulty here
            if self._recalculate(connection, session):
                changes += 1

        # Log results only when at least one connection has finished its short period
        if changes > 0:
            log.info("No submits processed in %.03fs, %d / %d (chng, total)" % \
                     (posix_time() - start, changes, total))

        # Get new shallow copy of all connections
        self._no_submit_conn_refs = ConnectionRegistry.get_shallow_copy()
Esempio n. 18
0
    def on_template(cls, is_new_block):
        '''This is called when TemplateRegistry registers
           new block which we have to broadcast clients.'''

        start = posix_time()

        clean_jobs = is_new_block
        (job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, _) = \
                        Interfaces.template_registry.get_last_template_broadcast_args()

        if not is_new_block:
            try:
                cls.before_broadcast.callback(True)
                cls.before_broadcast = defer.Deferred()
            except:
                log.exception("before_broadcast callback failed!")

        # Push new job to subscribed clients
        cls.emit("%x"%job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, clean_jobs)

        cnt = Pubsub.get_subscription_count(cls.event)
        log.info("BROADCASTED to %d connections in %.03f sec" % (cnt, (posix_time() - start)))
    def update_block(self):
        '''Registry calls the getblocktemplate() RPC
        and build new block template.'''

        if self.update_in_progress:
            # Block has been already detected
            return

        self.update_in_progress = True
        self.last_update = posix_time()

        d = self.bitcoin_rpc.getblocktemplate()
        d.addCallback(self._update_block)
        d.addErrback(self._update_block_failed)
Esempio n. 20
0
    def on_template(cls, is_new_block):
        '''This is called when TemplateRegistry registers
           new block which we have to broadcast clients.'''

        start = posix_time()

        clean_jobs = is_new_block
        (job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, _) = \
                        Interfaces.template_registry.get_last_template_broadcast_args()

        if not is_new_block:
            try:
                cls.before_broadcast.callback(True)
                cls.before_broadcast = defer.Deferred()
            except:
                log.exception("before_broadcast callback failed!")

        # Push new job to subscribed clients
        cls.emit("%x" % job_id, prevhash, coinb1, coinb2, merkle_branch,
                 version, nbits, ntime, clean_jobs)

        cnt = Pubsub.get_subscription_count(cls.event)
        log.info("BROADCASTED to %d connections in %.03f sec" %
                 (cnt, (posix_time() - start)))
Esempio n. 21
0
    def _change_difficulty(self, session, new_difficulty):
        org = session['SL_difficulty']

        if new_difficulty > org:
            session['SL_changes_up'] += 1
        if new_difficulty < org:
            session['SL_changes_down'] += 1

        session['SL_difficulty'] = new_difficulty
        session['SL_difficulty_set_at'] = posix_time()

        # Report the changed difficulty
        Interfaces.reporter.difficulty_changed(session)

        # We need to restart submission collection to not mix shares with different
        # difficulty
        self._reset_submission_collection(session)
Esempio n. 22
0
    def _change_difficulty(self, session, new_difficulty):
        org = session['SL_difficulty']

        if new_difficulty > org:
            session['SL_changes_up'] += 1
        if new_difficulty < org:
            session['SL_changes_down'] += 1

        session['SL_difficulty'] = new_difficulty
        session['SL_difficulty_set_at'] = posix_time()

        # Report the changed difficulty
        Interfaces.reporter.difficulty_changed(session)

        # We need to restart submission collection to not mix shares with different
        # difficulty
        self._reset_submission_collection(session)
Esempio n. 23
0
    def _initialize_session(session, connection):
        if session.get('session_id'):
            raise

        session['connected_at'] = posix_time()
        session['subscribed_at'] = None
        session['disconnected_at'] = None
        session['session_id'] = Interfaces.ids_provider.get_new_session_id()
        session['authorized'] = {}
        session['client_ip'] = connection._get_ip()
        session['client_sw'] = None
        session['unauthorized_submits'] = 0

        # Share limiter's initialization of session attributes
        Interfaces.share_limiter.initialize_session(session)
        # Share manager's initialization of session attributes
        Interfaces.share_manager.initialize_session(session)
Esempio n. 24
0
    def on_submit_share(self, connection_ref, session):
        now = posix_time()

        # Remember the submit
        session['SL_submits'] += 1

        # If we reached planned recalculation time
        if now >= session['SL_last_recalculation_time'] + config.LIMITER_RECALCULATION_PERIOD_S:
            connection = connection_ref()
            if connection:
                self._recalculate(connection, session)
                # Remove the current connection from collection of conns. without submit
                try:
                    del self._no_submit_conn_refs[connection]
                except:
                    pass

        # Return difficulty of the just submitted shares
        return session['SL_difficulty']
Esempio n. 25
0
    def _simulation_step(self, first=False):
        try:
            now = posix_time()
            now_secs = int(now)
            for w in self._workers:
                if w['next_share_at'] <= now:
                    next_share_at = w['next_share_at'] +\
                                    self.get_next_share_wait_time(w['ghps'],
                                                                  w['difficulty'],
                                                                  w['deterministic'])
                    w['next_share_at'] = max(now, next_share_at)
                    if not first:
                        self._backend.on_submit_share(self._SESSION,
                                                      w['stats'],
                                                      w['difficulty'],
                                                      now_secs)
        except:
            log.error(traceback.format_exc())

        self._plan_simulation_step()
Esempio n. 26
0
    def on_submit_share(self, connection_ref, session):
        now = posix_time()

        # Remember the submit
        session['SL_submits'] += 1

        # If we reached planned recalculation time
        if now >= session[
                'SL_last_recalculation_time'] + config.LIMITER_RECALCULATION_PERIOD_S:
            connection = connection_ref()
            if connection:
                self._recalculate(connection, session)
                # Remove the current connection from collection of conns. without submit
                try:
                    del self._no_submit_conn_refs[connection]
                except:
                    pass

        # Return difficulty of the just submitted shares
        return session['SL_difficulty']
Esempio n. 27
0
    def initialize_session(self, session):
        if not session.get('SL_difficulty'):

            now = posix_time()
            # Initial difficulty for new connections - it depends on the server's state.
            # When the server is accepting connections shorter then a configured time
            # then special default difficulty is applied to protect the server from
            # extreme submission rate from miners with not properly set difficulty.
            if self._startup_time and \
                    now > self._startup_time + config.LIMITER_STARTUP_PERIOD_S:
                session['SL_difficulty'] = config.LIMITER_DEFAULT_DIFFICULTY
            else:
                session['SL_difficulty'] = config.LIMITER_DEFAULT_DIFFICULTY_STARTUP
            session['SL_difficulty_set_at'] = now

            session['SL_requested_difficulty'] = None
            session['SL_last_sent_difficulty'] = None

            # Statistical information
            session['SL_changes_up'] = 0
            session['SL_changes_down'] = 0

            self._reset_submission_collection(session)
Esempio n. 28
0
    def initialize_session(self, session):
        if not session.get('SL_difficulty'):

            now = posix_time()
            # Initial difficulty for new connections - it depends on the server's state.
            # When the server is accepting connections shorter then a configured time
            # then special default difficulty is applied to protect the server from
            # extreme submission rate from miners with not properly set difficulty.
            if self._startup_time and \
                    now > self._startup_time + config.LIMITER_STARTUP_PERIOD_S:
                session['SL_difficulty'] = config.LIMITER_DEFAULT_DIFFICULTY
            else:
                session[
                    'SL_difficulty'] = config.LIMITER_DEFAULT_DIFFICULTY_STARTUP
            session['SL_difficulty_set_at'] = now

            session['SL_requested_difficulty'] = None
            session['SL_last_sent_difficulty'] = None

            # Statistical information
            session['SL_changes_up'] = 0
            session['SL_changes_down'] = 0

            self._reset_submission_collection(session)
Esempio n. 29
0
    def disconnected(self):
        connection = self.connection_ref()
        session = connection.get_session()

        session['disconnected_at'] = posix_time()
        Interfaces.reporter.session_disconnected(session)
Esempio n. 30
0
 def _get_next_time(self):
     when = config.PREVHASH_REFRESH_INTERVAL - (posix_time() - self.registry.last_update) % config.PREVHASH_REFRESH_INTERVAL #@UndefinedVariable
     return when
Esempio n. 31
0
 def _server_started(self, *events):
     self._startup_time = posix_time()
     log.info("Server started")
Esempio n. 32
0
    def _recalculate(self, connection, session):
        now = posix_time()
        result = False

        # Short period features
        submits = session['SL_submits']
        part_submits = session['SL_first_part_submits']
        secs = now - session['SL_start_time']
        part_secs = now - session['SL_first_part_end_time']

        submission_rate = float(submits) / secs
        secs = int(secs + 0.5)

        session['SL_last_recalculation_time'] = now
        session['SL_submission_rate'] = submission_rate

        # We completed half of the full collection period so we should restart it
        if part_secs >= self._HALF_COLLECTION_PERIOD:
            part_submits = submits - part_submits
            session['SL_submits'] =  part_submits
            session['SL_first_part_submits'] = part_submits
            session['SL_start_time'] = session['SL_first_part_end_time']
            session['SL_first_part_end_time'] = now

        # A protection against index out of bounds (when a submit didn't arrived
        # for quite long time and we're over the pre-computed period)
        if secs > self._PERIOD_WITH_COMPUTED_LIMITS:
            # Late submit means that the current submission rate is really low
            # and it is most probable that the current difficulty is already 1
            # and session workers are too weak. We then linearly approximate
            # submit count.
            submits = submits * self._PERIOD_WITH_COMPUTED_LIMITS / float(secs)
            secs = self._PERIOD_WITH_COMPUTED_LIMITS

        # Number of submits exceeds an expected and probable count
        if submits > self._MAX_SUBMITS_IN_TIME[secs]:
            result = (self._request_difficulty_by_rate(submission_rate,
                                                       connection, session,
                                                       config.LIMITER_CHANGE_RATIO_UP)
                      or result)

        # Number is lower then an expected and probable count
        if submits < self._MIN_SUBMITS_IN_TIME[secs] \
                and session['SL_difficulty'] > config.LIMITER_MINIMAL_DIFFICULTY:
            result = (self._request_difficulty_by_rate(submission_rate,
                                                       connection, session,
                                                       config.LIMITER_CHANGE_RATIO_DOWN)
                      or result)

        # If the current difficulty is stable long time enough, try to fine tune the difficulty
        if (now - session['SL_difficulty_set_at']) >= config.LIMITER_FINE_TUNE_AFTER_S \
            and (submission_rate >= self._FINE_TUNE_UPPER_RATE \
                 or submission_rate <= self._FINE_TUNE_LOWER_RATE):

            # Get moving average for short time hash rate (roughly minutes) and compute
            # estimated submission rate over the period (related to the current difficulty)
            submission_rate = float(Interfaces.share_manager.get_session_short_wsma(session)) / \
                                    session['SL_difficulty']
            if submission_rate >= self._FINE_TUNE_UPPER_RATE or submission_rate <= self._FINE_TUNE_LOWER_RATE:
                result = (self._request_difficulty_by_rate(submission_rate,
                                                           connection, session, 1)
                          or result)

        # There is not a reason for changing difficulty from submissions and we are
        # far enough form the subscription. In such case we want to cancel the change request.
        # We block the cancellation right after subscription because of applied suggested
        # difficulty and slow workers startup (there can be significant delay.
        if session['SL_requested_difficulty'] and not result \
                and (now - session['subscribed_at']) >= config.LIMITER_BLOCK_REQUEST_CANCEL_PERIOD_S:

            session['SL_requested_difficulty'] = None
            log.info("request cancelled for %s:%d (%s)" % \
                     (session['client_ip'], session['session_id'],
                      ",".join(session['authorized'].keys())))

        return result
Esempio n. 33
0
    def _recalculate(self, connection, session):
        now = posix_time()
        result = False

        # Short period features
        submits = session['SL_submits']
        part_submits = session['SL_first_part_submits']
        secs = now - session['SL_start_time']
        part_secs = now - session['SL_first_part_end_time']

        submission_rate = float(submits) / secs
        secs = int(secs + 0.5)

        session['SL_last_recalculation_time'] = now
        session['SL_submission_rate'] = submission_rate

        # We completed half of the full collection period so we should restart it
        if part_secs >= self._HALF_COLLECTION_PERIOD:
            part_submits = submits - part_submits
            session['SL_submits'] = part_submits
            session['SL_first_part_submits'] = part_submits
            session['SL_start_time'] = session['SL_first_part_end_time']
            session['SL_first_part_end_time'] = now

        # A protection against index out of bounds (when a submit didn't arrived
        # for quite long time and we're over the pre-computed period)
        if secs > self._PERIOD_WITH_COMPUTED_LIMITS:
            # Late submit means that the current submission rate is really low
            # and it is most probable that the current difficulty is already 1
            # and session workers are too weak. We then linearly approximate
            # submit count.
            submits = submits * self._PERIOD_WITH_COMPUTED_LIMITS / float(secs)
            secs = self._PERIOD_WITH_COMPUTED_LIMITS

        # Number of submits exceeds an expected and probable count
        if submits > self._MAX_SUBMITS_IN_TIME[secs]:
            result = (self._request_difficulty_by_rate(
                submission_rate, connection, session,
                config.LIMITER_CHANGE_RATIO_UP) or result)

        # Number is lower then an expected and probable count
        if submits < self._MIN_SUBMITS_IN_TIME[secs] \
                and session['SL_difficulty'] > config.LIMITER_MINIMAL_DIFFICULTY:
            result = (self._request_difficulty_by_rate(
                submission_rate, connection, session,
                config.LIMITER_CHANGE_RATIO_DOWN) or result)

        # If the current difficulty is stable long time enough, try to fine tune the difficulty
        if (now - session['SL_difficulty_set_at']) >= config.LIMITER_FINE_TUNE_AFTER_S \
            and (submission_rate >= self._FINE_TUNE_UPPER_RATE \
                 or submission_rate <= self._FINE_TUNE_LOWER_RATE):

            # Get moving average for short time hash rate (roughly minutes) and compute
            # estimated submission rate over the period (related to the current difficulty)
            submission_rate = float(Interfaces.share_manager.get_session_short_wsma(session)) / \
                                    session['SL_difficulty']
            if submission_rate >= self._FINE_TUNE_UPPER_RATE or submission_rate <= self._FINE_TUNE_LOWER_RATE:
                result = (self._request_difficulty_by_rate(
                    submission_rate, connection, session, 1) or result)

        # There is not a reason for changing difficulty from submissions and we are
        # far enough form the subscription. In such case we want to cancel the change request.
        # We block the cancellation right after subscription because of applied suggested
        # difficulty and slow workers startup (there can be significant delay.
        if session['SL_requested_difficulty'] and not result \
                and (now - session['subscribed_at']) >= config.LIMITER_BLOCK_REQUEST_CANCEL_PERIOD_S:

            session['SL_requested_difficulty'] = None
            log.info("request cancelled for %s:%d (%s)" % \
                     (session['client_ip'], session['session_id'],
                      ",".join(session['authorized'].keys())))

        return result
Esempio n. 34
0
 def _report_simple_event(self, ev_name):
     events = self._simple_events[ev_name]
     if len(events) > 0:
         self._simple_events[ev_name] = []
         self._queue.deliver([ev_name, int(posix_time() * 1000), events]) #@UndefinedVariable
Esempio n. 35
0
 def _server_started(self, *events):
     self._startup_time = posix_time()
     log.info("Server started")