def initialize_worker_stats(self, session, worker_id, worker_name): now = posix_secs() self._last_worker_session_id += 1 stats = { "session_id": session["session_id"], "worker_id": worker_id, "worker_name": worker_name, "worker_session_id": self._last_worker_session_id, "authorized_at": now, "stale_submits": 0, "old_submits": 0, "invalid_submits": 0, "valid_submits": 0, "valid_shares": 0, "last_valid_share": None, "wsma_prev_rate_short": None, "wsma_prev_rate_long": None, "wsma_start_time_short": now, "wsma_start_time_long": now, "wsma_shares_short": 0, "wsma_shares_long": 0, "wsma_rate_short": 0.0, "wsma_rate_long": 0.0, } session["SM_worker_stats"][worker_name] = stats return stats
def __init__(self, host, port, max_queue_len=3600, max_active_queue_len=5, retry_after_s=5, start_suspended=False): JsonReporter.__init__(self, host, port, max_queue_len=max_queue_len, max_active_queue_len=max_active_queue_len, retry_after_s=retry_after_s, start_suspended=start_suspended) async_group = files.TimeBasedAsyncFileSequence( config.SHARE_TRACE_FILE_NAME_PATTERN, config.SHARE_TRACE_FILE_DIR, "a", period_length=3600, # One hour period auto_switch=False, # We want to control the file switching buffer_limit=20000) self._file_group = async_group self._trace_file = async_group.get_async_file() task.LoopingCall(self._trace_file.flush_buf).start(3.0) self._last_push_period = posix_secs() - 1
def initialize_worker_stats(self, session, worker_id, worker_name): now = posix_secs() self._last_worker_session_id += 1 stats = { 'session_id': session['session_id'], 'worker_id': worker_id, 'worker_name': worker_name, 'worker_session_id': self._last_worker_session_id, 'authorized_at': now, 'stale_submits': 0, 'old_submits': 0, 'invalid_submits': 0, 'valid_submits': 0, 'valid_shares': 0, 'last_valid_share': None, 'wsma_prev_rate_short': None, 'wsma_prev_rate_long': None, 'wsma_start_time_short': now, 'wsma_start_time_long': now, 'wsma_shares_short': 0, 'wsma_shares_long': 0, 'wsma_rate_short': 0., 'wsma_rate_long': 0. } session['SM_worker_stats'][worker_name] = stats return stats
def reset(self): self._statements_sent.clear() self._statements_ok.clear() self._statements_late.clear() self._statements_err.clear() self._timing.clear() self._last_late_statement_at = None self._reset_secs = posix_secs()
def __init__(self, late_delivery_limit=5): self._statements_sent = {} self._statements_ok = {} self._statements_late = {} self._statements_err = {} self._timing = {} self._last_late_statement_at = None self._late_delivery_limit = late_delivery_limit self._reset_secs = posix_secs()
def sess_stats(iterator): now = posix_secs() session_count = connection_time = worker_count = valid_shares = 0 valid_submits = invalid_submits = stale_submits = old_submits = 0 has_share = after_share_time = wsma_rate_short = wsma_rate_long = 0 difficulty_up = difficulty_down = 0 for session in iterator: if not session.get('subscribed_at', None): continue session_count += 1 connection_time += (now - int(session['subscribed_at'])) worker_count += len(session['SM_worker_stats']) difficulty_up += session['SL_changes_up'] difficulty_down += session['SL_changes_down'] last_share = None for ws in session['SM_worker_stats'].itervalues(): valid_shares += ws['valid_shares'] valid_submits += ws['valid_submits'] invalid_submits += ws['invalid_submits'] stale_submits += ws['stale_submits'] old_submits += ws['old_submits'] valid_shares += ws['valid_shares'] valid_shares += ws['valid_shares'] wsma_rate_short += ws['wsma_rate_short'] wsma_rate_long += ws['wsma_rate_long'] # stats['submits_cum'].append((ws['valid_submits'], ws['stale_submits'], ws['invalid_submits'], # (ws['wsma_rate_long'] * (2 ** 32)) / (1000 * 1000 * 1000), # session['SL_difficulty'], session['SL_changes_up'], session['SL_changes_down'])) if ws['last_valid_share']: if last_share is None or ws['last_valid_share'] < last_share: last_share = ws['last_valid_share'] if last_share: has_share += 1 after_share_time += (now - int(last_share)) yield { 'now': now, 'sessions_count': session_count, 'connection_time': connection_time, 'worker_count': worker_count, 'valid_shares': valid_shares, 'valid_submits': valid_submits, 'invalid_submits': invalid_submits, 'stale_submits': stale_submits, 'old_submits': old_submits, 'difficulty_up': difficulty_up, 'difficulty_down': difficulty_down, 'wsma_ghps_short': wsma_rate_short * SHARE_RATE_TO_GHASH_PS, 'wsma_ghps_long': wsma_rate_long * SHARE_RATE_TO_GHASH_PS, }
def print_stats(cls): now = posix_secs() if cls.counter and (now < cls.log_secs + 3 or float(cls.changes) / cls.counter < 0.05): # Print connection stats only when more than # 5% connections change to avoid log spam return log.info("%d peers connected, state changed %d times" % (cls.counter, cls.changes)) cls.log_secs = now cls.changes = 0
def push_shares(self): """Is called by PushWatchdog when we need to push buffered shares to db""" secs = posix_secs() self._db_share_backend.push_shares(secs) self._sink_share_backend.push_shares(secs) # Sometimes notification is faster than # database update (because of old pool?) self.load_block_info()
def start_simulation(self): self._backend = Interfaces.share_manager.get_db_share_backend() now_secs = posix_secs() for w in self._workers: w['stats'] = { 'worker_id': w['worker_id'], 'worker_session_id': -w['rank'] } w['next_share_at'] = now_secs self._simulation_step(first=True) log.info('Simulation started')
def __init__(self): self._shares = {} self._shares_count = 0 # Counter of shares just for stats # Slot-push related members self._current_slot_num_to_push = 0 self._slot_score = 0.0 # Not pushed score in the current slot self._current_second_facts = { 'secs': posix_secs() - 1, 'score': 1., } self._block_id = None self._block_started_secs = None
def push_block(self, worker_id, submit_secs, block_hash, block_height, difficulty, value): if not config.SHARE_SINK_ENABLED and not config.SHARE_TRACE_ENABLED: return block = (block_hash, block_height, submit_secs, value, difficulty, worker_id) timestamp = posix_secs() event = ['BLOCK', timestamp, block] if config.SHARE_TRACE_ENABLED: # Ensure the record is going to the right file self._file_group.check_switch(timestamp) self._trace_file.write_buf(json.dumps(event)) self._trace_file.write_buf('\n') if config.SHARE_SINK_ENABLED: self.deliver(event)
def __init__(self, host, port, max_queue_len=3600, max_active_queue_len=5, retry_after_s=5, start_suspended=False): JsonReporter.__init__(self, host, port, max_queue_len=max_queue_len, max_active_queue_len=max_active_queue_len, retry_after_s=retry_after_s, start_suspended=start_suspended) async_group = files.TimeBasedAsyncFileSequence(config.SHARE_TRACE_FILE_NAME_PATTERN, config.SHARE_TRACE_FILE_DIR, "a", period_length=3600, # One hour period auto_switch=False, # We want to control the file switching buffer_limit=20000) self._file_group = async_group self._trace_file = async_group.get_async_file() task.LoopingCall(self._trace_file.flush_buf).start(3.0) self._last_push_period = posix_secs() - 1
def __init__(self): # Current share-sink's pushing period self._curr_push_period = posix_secs() # Share-sink current push set - data collected during current collection period self._push_set = {} self._last_nonempty_push_period = None
def submit(self, worker_name, job_id, extranonce2, ntime, nonce, *args): """Try to solve block candidate using given parameters.""" job_id = int(job_id.lower(), 16) extranonce2 = extranonce2.lower() ntime = ntime.lower() nonce = nonce.lower() conn = self.connection_ref() session = conn.get_session() # Check if extranonce1 is in connection session extranonce1_bin = session.get('extranonce1', None) if not extranonce1_bin: raise SubmitException("Connection is not subscribed for mining") # Check if worker is authorized to submit shares if not Interfaces.worker_manager.authorize(worker_name, None): try: session['unauthorized_submits'] += 1 except exceptions.KeyError: session['unauthorized_submits'] = 1 raise SubmitException("Worker is not authorized") worker_stats = Interfaces.share_manager.get_worker_stats(session, worker_name) # Get the lowest job_id valid for the current block block_min_job_id = Interfaces.template_registry.get_block_min_job_id() # Stale share (hard) - the referenced job is not already a valid job (not a chain head). if job_id < block_min_job_id: log.info("Stale share, job %s, block_min_job_id %s for %s %s" % \ (job_id, block_min_job_id, worker_name, conn.get_ident())) worker_stats['stale_submits'] += 1 Interfaces.reporter.worker_stats_changed(worker_stats) raise SubmitException("Stale share") # Check if the job_id is too old for the current difficulty (vardiff). if job_id < session['min_job_id']: worker_stats['old_submits'] += 1 Interfaces.reporter.worker_stats_changed(worker_stats) # Try to process the share, but take it with the minimal difficulty (we don't know # easily and cheaply the correct difficulty) - and it's kind of a stale share anyway difficulty = config.LIMITER_MINIMAL_DIFFICULTY else: # Notify share limiter about new submit and ask for the submit's difficulty difficulty = Interfaces.share_limiter.on_submit_share(self.connection_ref, session) # Take a timestamp for this submit submit_secs = posix_secs() # This checks if submitted share meet all requirements # and it is valid proof of work. try: (block_header, block_hash, block_value, on_submit) = \ Interfaces.template_registry.submit_share(job_id, worker_name, extranonce1_bin, extranonce2, ntime, nonce, difficulty) except: # We pass the shares as NOT valid (e.g. for being logged) Interfaces.share_manager.on_invalid_submit_share(session, worker_stats, difficulty, job_id) # Invalid submit should lead to an exception propagated to the client raise # Register the submitted shares and (later) propagate it to database Interfaces.share_manager.on_submit_share(session, worker_stats, difficulty, submit_secs) if on_submit is not None: # Pool performs submitblock() to bitcoind. Let's hook # to result and report it to share manager on_submit.addCallback(Interfaces.share_manager.on_submit_block, worker_stats, block_header, block_hash, submit_secs, block_value) return True