def test_stratum_confirm(self): """ Test some raw data from cgminer submitting a share, confirm hashes come out the same as cgminer. Raw stratum params: """ gbt = {u'bits': u'1e00e92b', u'coinbaseaux': {u'flags': u'062f503253482f'}, u'coinbasevalue': 5000000000, u'curtime': 1392509565, u'height': 203588, u'mintime': 1392508633, u'mutable': [u'time', u'transactions', u'prevblock'], u'noncerange': u'00000000ffffffff', u'previousblockhash': u'b0f5ecb62774f2f07fdc0f72fa0585ae3e8ca78ad8692209a355d12bc690fb73', u'sigoplimit': 20000, u'sizelimit': 1000000, u'target': u'000000e92b000000000000000000000000000000000000000000000000000000', u'transactions': [], u'version': 2} extra1 = '0000000000000000' submit = {'extra2': '00000000', 'nonce': 'd5160000', 'result': '000050ccfe8a3efe93b2ee33d2aecf4a60c809995c7dd19368a7d00c86880f30'} # build a block template object from the raw data coinbase = Transaction() coinbase.version = 2 coinbase.inputs.append(Input.coinbase(gbt['height'], b'\0' * 12)) coinbase.outputs.append(Output.to_address(gbt['coinbasevalue'], 'D7QJyeBNuwEqxsyVCLJi3pHs64uPdMDuBa')) transactions = [] for trans in gbt['transactions']: new_trans = Transaction(unhexlify(trans['data']), fees=trans['fee']) assert trans['hash'] == new_trans.lehexhash transactions.append(new_trans) bt = BlockTemplate.from_gbt(gbt, coinbase, 12, transactions) send_params = bt.stratum_params() print("job_id: {0}\nprevhash: {1}\ncoinbase1: {2}\ncoinbase2: {3}" "\nmerkle_branch: {4}\nversion: {5}\nnbits: {6}\nntime: {7}" .format(*send_params)) header = bt.block_header(submit['nonce'], extra1, submit['extra2']) hash_bin = scrypt(header) target = target_from_diff(1, 0x0000FFFF00000000000000000000000000000000000000000000000000000000) hash_int = uint256_from_str(hash_bin) hash_hex = "%064x" % hash_int self.assertEquals(hash_hex, submit['result']) assert hash_int < target
def test_target_from_diff(self): # assert a difficulty of zero returns the correct integer self.assertEquals( target_from_diff(1), 0x00000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
def submit_job(self, data): """ Handles recieving work submission and checking that it is valid , if it meets network diff, etc. Sends reply to stratum client. """ params = data['params'] # [worker_name, job_id, extranonce2, ntime, nonce] # ["slush.miner1", "bf", "00000001", "504e86ed", "b2957c02"] self.logger.debug( "Recieved work submit:\n\tworker_name: {0}\n\t" "job_id: {1}\n\textranonce2: {2}\n\t" "ntime: {3}\n\tnonce: {4} ({int_nonce})" .format( *params, int_nonce=unpack(str("<L"), unhexlify(params[4])))) try: difficulty, jobid = self.job_mapper[data['params'][1]] job = self.net_state['jobs'][jobid] except KeyError: # stale job self.send_error(self.STALE_SHARE) # TODO: should really try to use the correct diff self.server_state['reject_stale'].incr(self.difficulty) return self.STALE_SHARE header = job.block_header( nonce=params[4], extra1=self.id, extra2=params[2], ntime=params[3]) # Check a submitted share against previous shares to eliminate # duplicates share = (self.id, params[2], params[4]) if share in job.acc_shares: self.logger.info("Duplicate share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.DUP_SHARE) self.server_state['reject_dup'].incr(difficulty) return self.DUP_SHARE job_target = target_from_diff(difficulty, self.config['diff1']) hash_int = self.config['pow_func'](header) if hash_int >= job_target: self.logger.info("Low diff share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.LOW_DIFF) self.server_state['reject_low'].incr(difficulty) return self.LOW_DIFF # we want to send an ack ASAP, so do it here self.send_success(self.msg_id) self.logger.info("Valid share accepted from worker {}.{}!" .format(self.address, self.worker)) # Add the share to the accepted set to check for dups job.acc_shares.add(share) self.server_state['shares'].incr(difficulty) self.celery.send_task_pp('add_share', self.address, difficulty) # valid network hash? if hash_int >= job.bits_target: return self.VALID_SHARE try: self.logger.log(35, "Valid network block identified!") self.logger.info("New block at height %i" % self.net_state['current_height']) self.logger.info("Block coinbase hash %s" % job.coinbase.lehexhash) block = hexlify(job.submit_serial(header)) self.logger.log(35, "New block hex dump:\n{}".format(block)) self.logger.log(35, "Coinbase: {}".format(str(job.coinbase.to_dict()))) for trans in job.transactions: self.logger.log(35, str(trans.to_dict())) except Exception: # because I'm paranoid... self.logger.error("Unexcpected exception in block logging!", exc_info=True) def submit_block(conn): retries = 0 while retries < 5: try: res = conn.submitblock(block) except (CoinRPCException, socket.error, ValueError) as e: self.logger.error("Block failed to submit to the server {}!" .format(conn.name), exc_info=True) self.logger.error(getattr(e, 'error')) else: if res is None: hash_hex = hexlify( sha256(sha256(header).digest()).digest()[::-1]) self.celery.send_task_pp( 'add_block', self.address, self.net_state['current_height'] + 1, job.total_value, job.fee_total, hexlify(job.bits), hash_hex) self.logger.info("NEW BLOCK ACCEPTED by {}!!!" .format(conn.name)) self.server_state['block_solve'] = int(time()) break # break retry loop if success else: self.logger.error( "Block failed to submit to the server {}, " "server returned {}!".format(conn.name, res), exc_info=True) retries += 1 sleep(1) self.logger.info("Retry {} for connection {}".format(retries, conn.name)) for conn in self.net_state['live_connections']: # spawn a new greenlet for each submission to do them all async. # lower orphan chance spawn(submit_block, conn) return self.BLOCK_FOUND
def submit_job(self, data, t): """ Handles recieving work submission and checking that it is valid , if it meets network diff, etc. Sends reply to stratum client. """ params = data['params'] # [worker_name, job_id, extranonce2, ntime, nonce] # ["slush.miner1", "bf", "00000001", "504e86ed", "b2957c02"] if __debug__: self.logger.debug( "Recieved work submit:\n\tworker_name: {0}\n\t" "job_id: {1}\n\textranonce2: {2}\n\t" "ntime: {3}\n\tnonce: {4} ({int_nonce})" .format( *params, int_nonce=struct.unpack(str("<L"), unhexlify(params[4])))) if self.idle: self.idle = False self.server.idle_clients -= 1 self.last_share_submit = time.time() try: difficulty, job = self.job_mapper[data['params'][1]] job = job() # weakref will be none if it's been GCed except KeyError: try: difficulty, job = self.old_job_mapper[data['params'][1]] job = job() # weakref will be none if it's been GCed except KeyError: job = None # Job not in jobmapper at all, we got a bogus submit # since we can't identify the diff we just have to assume it's # current diff difficulty = self.difficulty if job is None: self.send_error(self.STALE_SHARE_ERR, id_val=data['id']) self.reporter.log_share(client=self, diff=self.difficulty, typ=self.STALE_SHARE, params=params, start=t) return difficulty, self.STALE_SHARE # assemble a complete block header bytestring header = job.block_header( nonce=params[4], extra1=self._id, extra2=params[2], ntime=params[3]) # Check a submitted share against previous shares to eliminate # duplicates share = (self._id, params[2], params[4], params[3]) if share in job.acc_shares: self.logger.info("Duplicate share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.DUP_SHARE_ERR, id_val=data['id']) self.reporter.log_share(client=self, diff=difficulty, typ=self.DUP_SHARE, params=params, job=job, start=t) return difficulty, self.DUP_SHARE job_target = target_from_diff(difficulty, job.diff1) hash_int = uint256_from_str(self.algo['module'](header)) if hash_int >= job_target: self.logger.info("Low diff share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.LOW_DIFF_ERR, id_val=data['id']) self.reporter.log_share(client=self, diff=difficulty, typ=self.LOW_DIFF_SHARE, params=params, job=job, start=t) return difficulty, self.LOW_DIFF_SHARE # we want to send an ack ASAP, so do it here self.send_success(id_val=data['id']) # Add the share to the accepted set to check for dups job.acc_shares.add(share) self.accepted_shares += difficulty self.reporter.log_share(client=self, diff=difficulty, typ=self.VALID_SHARE, params=params, job=job, header_hash=hash_int, header=header, start=t) return difficulty, self.VALID_SHARE
def test_stratum_confirm(self): """ Test some raw data from cgminer submitting a share, confirm hashes come out the same as cgminer. Raw stratum params: """ gbt = { u'bits': u'1e00e92b', u'coinbaseaux': { u'flags': u'062f503253482f' }, u'coinbasevalue': 5000000000, u'curtime': 1392509565, u'height': 203588, u'mintime': 1392508633, u'mutable': [u'time', u'transactions', u'prevblock'], u'noncerange': u'00000000ffffffff', u'previousblockhash': u'b0f5ecb62774f2f07fdc0f72fa0585ae3e8ca78ad8692209a355d12bc690fb73', u'sigoplimit': 20000, u'sizelimit': 1000000, u'target': u'000000e92b000000000000000000000000000000000000000000000000000000', u'transactions': [], u'version': 2 } extra1 = '0000000000000000' submit = { 'extra2': '00000000', 'nonce': 'd5160000', 'result': '000050ccfe8a3efe93b2ee33d2aecf4a60c809995c7dd19368a7d00c86880f30' } # build a block template object from the raw data coinbase = Transaction() coinbase.version = 2 coinbase.inputs.append(Input.coinbase(gbt['height'], [b'\0' * 12])) coinbase.outputs.append( Output.to_address(gbt['coinbasevalue'], 'D7QJyeBNuwEqxsyVCLJi3pHs64uPdMDuBa')) transactions = [] for trans in gbt['transactions']: new_trans = Transaction(unhexlify(trans['data']), fees=trans['fee']) assert trans['hash'] == new_trans.lehexhash transactions.append(new_trans) bt = BlockTemplate.from_gbt(gbt, coinbase, 12, transactions) send_params = bt.stratum_params() print( "job_id: {0}\nprevhash: {1}\ncoinbase1: {2}\ncoinbase2: {3}" "\nmerkle_branch: {4}\nversion: {5}\nnbits: {6}\nntime: {7}". format(*send_params)) header = bt.block_header(submit['nonce'], extra1, submit['extra2']) target = target_from_diff( 1, 0x0000FFFF00000000000000000000000000000000000000000000000000000000) self.assertEquals( hexlify(sha256d(header)[::-1]).decode('ascii'), submit['result']) assert hash_int < target
def submit_job(self, data): """ Handles recieving work submission and checking that it is valid , if it meets network diff, etc. Sends reply to stratum client. """ start = time.time() params = data['params'] # [worker_name, job_id, extranonce2, ntime, nonce] # ["slush.miner1", "bf", "00000001", "504e86ed", "b2957c02"] if __debug__: self.logger.debug( "Recieved work submit:\n\tworker_name: {0}\n\t" "job_id: {1}\n\textranonce2: {2}\n\t" "ntime: {3}\n\tnonce: {4} ({int_nonce})" .format( *params, int_nonce=struct.unpack(str("<L"), unhexlify(params[4])))) if self.idle: self.idle = False self.stratum_manager.idle_clients -= 1 self.last_share_submit = time.time() try: difficulty, jobid = self.job_mapper[data['params'][1]] except KeyError: # since we can't identify the diff we just have to assume it's # current diff self.send_error(self.STALE_SHARE_ERR, id_val=self.msg_id) self.server['reject_stale'].incr(self.difficulty) self.server['reject_stale_shares'].incr() return self.STALE_SHARE, self.difficulty # lookup the job in the global job dictionary. If it's gone from here # then a new block was announced which wiped it try: job = self.jobmanager.jobs[jobid] except KeyError: self.send_error(self.STALE_SHARE_ERR, id_val=self.msg_id) self.server['reject_stale'].incr(difficulty) self.server['reject_stale_shares'].incr() return self.STALE_SHARE, difficulty # assemble a complete block header bytestring header = job.block_header( nonce=params[4], extra1=self.id, extra2=params[2], ntime=params[3]) # Grab the raw coinbase out of the job object before gevent can preempt # to another thread and change the value. Very important! coinbase_raw = job.coinbase.raw # Check a submitted share against previous shares to eliminate # duplicates share = (self.id, params[2], params[4], params[3]) if share in job.acc_shares: self.logger.info("Duplicate share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.DUP_SHARE_ERR, id_val=self.msg_id) self.server['reject_dup'].incr(difficulty) self.server['reject_dup_shares'].incr() return self.DUP_SHARE, difficulty job_target = target_from_diff(difficulty, job.diff1) hash_int = uint256_from_str(self.algos[job.algo](header)) if hash_int >= job_target: self.logger.info("Low diff share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.LOW_DIFF_ERR, id_val=self.msg_id) self.server['reject_low'].incr(difficulty) self.server['reject_low_shares'].incr() return self.LOW_DIFF, difficulty # we want to send an ack ASAP, so do it here self.send_success(id_val=self.msg_id) self.logger.debug("Valid share accepted from worker {}.{}!" .format(self.address, self.worker)) # Add the share to the accepted set to check for dups job.acc_shares.add(share) self.server['valid'].incr(difficulty) self.server['valid_shares'].incr() # Some coins use POW function to do blockhash, while others use SHA256. # Allow toggling if job.pow_block_hash: header_hash = self.algos[job.algo](header)[::-1] else: header_hash = sha256(sha256(header).digest()).digest()[::-1] hash_hex = hexlify(header_hash) # valid network hash? if hash_int <= job.bits_target: spawn(self.jobmanager.found_block, coinbase_raw, self.address, self.worker, hash_hex, header, job.job_id, start) outcome = self.BLOCK_FOUND else: outcome = self.VALID_SHARE # check each aux chain for validity for chain_id, data in job.merged_data.iteritems(): if hash_int <= data['target']: spawn(self.jobmanager.found_merged_block, self.address, self.worker, header, job.job_id, coinbase_raw, data['type']) return outcome, difficulty
def submit_job(self, data, t): """ Handles recieving work submission and checking that it is valid , if it meets network diff, etc. Sends reply to stratum client. """ params = data['params'] # [worker_name, job_id, extranonce2, ntime, nonce] # ["slush.miner1", "bf", "00000001", "504e86ed", "b2957c02"] if __debug__: self.logger.debug("Recieved work submit:\n\tworker_name: {0}\n\t" "job_id: {1}\n\textranonce2: {2}\n\t" "ntime: {3}\n\tnonce: {4} ({int_nonce})".format( *params, int_nonce=struct.unpack( str("<L"), unhexlify(params[4])))) if self.idle: self.idle = False self.server.idle_clients -= 1 self.last_share_submit = time.time() try: difficulty, job = self.job_mapper[data['params'][1]] job = job() # weakref will be None if the job has been GCed except KeyError: try: difficulty, job = self.old_job_mapper[data['params'][1]] job = job() # weakref will be None if the job has been GCed except KeyError: job = None # Job not in jobmapper at all, we got a bogus submit # since we can't identify the diff we just have to assume it's # current diff difficulty = self.difficulty if job not in self.server.active_jobs: self.send_error(self.STALE_SHARE_ERR, id_val=data['id']) self.counter['stale'] += 1 self.reporter.log_share(client=self, diff=self.difficulty, typ=self.STALE_SHARE, params=params, job=job, start=t) return difficulty, self.STALE_SHARE # assemble a complete block header bytestring header = job.block_header(nonce=params[4], extra1=self._id, extra2=params[2], ntime=params[3]) # Check a submitted share against previous shares to eliminate # duplicates share_lower = (self._id.lower(), params[2].lower(), params[4].lower(), params[3].lower()) if share_lower in job.acc_shares: self.logger.info( "Duplicate share rejected from worker {}.{}!".format( self.address, self.worker)) self.send_error(self.DUP_SHARE_ERR, id_val=data['id']) self.counter['duplicate'] += 1 self.reporter.log_share(client=self, diff=difficulty, typ=self.DUP_SHARE, params=params, job=job, start=t) return difficulty, self.DUP_SHARE job_target = target_from_diff(difficulty, job.diff1) hash_int = uint256_from_str(self.algo['module'](header)) if hash_int >= job_target: self.logger.info( "Low diff share rejected from worker {}.{}!".format( self.address, self.worker)) self.send_error(self.LOW_DIFF_ERR, id_val=data['id']) self.counter['LowDiff'] += 1 self.reporter.log_share(client=self, diff=difficulty, typ=self.LOW_DIFF_SHARE, params=params, job=job, start=t) return difficulty, self.LOW_DIFF_SHARE # we want to send an ack ASAP, so do it here self.send_success(id_val=data['id']) # Add the share to the accepted set to check for dups job.acc_shares.add(share_lower) self.counter['accepted'] += 1 multi = float( job.diff1 ) / 0x00000000FFFF0000000000000000000000000000000000000000000000000000 self.accepted_shares += difficulty / multi self.server.jobmanager.current_accepted_shares += difficulty / multi self.reporter.log_share(client=self, diff=difficulty, typ=self.VALID_SHARE, params=params, job=job, header_hash=hash_int, header=header, start=t) return difficulty, self.VALID_SHARE
def submit_job(self, data): """ Handles recieving work submission and checking that it is valid , if it meets network diff, etc. Sends reply to stratum client. """ start = time.time() params = data['params'] # [worker_name, job_id, extranonce2, ntime, nonce] # ["slush.miner1", "bf", "00000001", "504e86ed", "b2957c02"] if __debug__: self.logger.debug("Recieved work submit:\n\tworker_name: {0}\n\t" "job_id: {1}\n\textranonce2: {2}\n\t" "ntime: {3}\n\tnonce: {4} ({int_nonce})".format( *params, int_nonce=struct.unpack( str("<L"), unhexlify(params[4])))) if self.idle: self.idle = False self.stratum_manager.idle_clients -= 1 self.last_share_submit = time.time() try: difficulty, jobid = self.job_mapper[data['params'][1]] except KeyError: # since we can't identify the diff we just have to assume it's # current diff self.send_error(self.STALE_SHARE_ERR, id_val=self.msg_id) self.server['reject_stale'].incr(self.difficulty) self.server['reject_stale_shares'].incr() return self.STALE_SHARE, self.difficulty # lookup the job in the global job dictionary. If it's gone from here # then a new block was announced which wiped it try: job = self.jobmanager.jobs[jobid] except KeyError: self.send_error(self.STALE_SHARE_ERR, id_val=self.msg_id) self.server['reject_stale'].incr(difficulty) self.server['reject_stale_shares'].incr() return self.STALE_SHARE, difficulty # assemble a complete block header bytestring header = job.block_header(nonce=params[4], extra1=self.id, extra2=params[2], ntime=params[3]) # Grab the raw coinbase out of the job object before gevent can preempt # to another thread and change the value. Very important! coinbase_raw = job.coinbase.raw # Check a submitted share against previous shares to eliminate # duplicates share = (self.id, params[2], params[4], params[3]) if share in job.acc_shares: self.logger.info( "Duplicate share rejected from worker {}.{}!".format( self.address, self.worker)) self.send_error(self.DUP_SHARE_ERR, id_val=self.msg_id) self.server['reject_dup'].incr(difficulty) self.server['reject_dup_shares'].incr() return self.DUP_SHARE, difficulty job_target = target_from_diff(difficulty, job.diff1) hash_int = uint256_from_str(self.algos[job.algo](header)) if hash_int >= job_target: self.logger.info( "Low diff share rejected from worker {}.{}!".format( self.address, self.worker)) self.send_error(self.LOW_DIFF_ERR, id_val=self.msg_id) self.server['reject_low'].incr(difficulty) self.server['reject_low_shares'].incr() return self.LOW_DIFF, difficulty # we want to send an ack ASAP, so do it here self.send_success(id_val=self.msg_id) self.logger.debug("Valid share accepted from worker {}.{}!".format( self.address, self.worker)) # Add the share to the accepted set to check for dups job.acc_shares.add(share) self.server['valid'].incr(difficulty) self.server['valid_shares'].incr() # Some coins use POW function to do blockhash, while others use SHA256. # Allow toggling if job.pow_block_hash: header_hash = self.algos[job.algo](header)[::-1] else: header_hash = sha256(sha256(header).digest()).digest()[::-1] hash_hex = hexlify(header_hash) # valid network hash? if hash_int <= job.bits_target: spawn(self.jobmanager.found_block, coinbase_raw, self.address, self.worker, hash_hex, header, job.job_id, start) outcome = self.BLOCK_FOUND else: outcome = self.VALID_SHARE # check each aux chain for validity for chain_id, data in job.merged_data.iteritems(): if hash_int <= data['target']: spawn(self.jobmanager.found_merged_block, self.address, self.worker, header, job.job_id, coinbase_raw, data['type']) return outcome, difficulty
def submit_job(self, data): """ Handles recieving work submission and checking that it is valid , if it meets network diff, etc. Sends reply to stratum client. """ params = data['params'] # [worker_name, job_id, extranonce2, ntime, nonce] # ["slush.miner1", "bf", "00000001", "504e86ed", "b2957c02"] self.logger.debug( "Recieved work submit:\n\tworker_name: {0}\n\t" "job_id: {1}\n\textranonce2: {2}\n\t" "ntime: {3}\n\tnonce: {4} ({int_nonce})" .format( *params, int_nonce=struct.unpack(str("<L"), unhexlify(params[4])))) try: difficulty, jobid = self.job_mapper[data['params'][1]] except KeyError: # since we can't identify the diff we just have to assume it's # current diff self.send_error(self.STALE_SHARE) self.server_state['reject_stale'].incr(self.difficulty) return self.STALE_SHARE, self.difficulty # lookup the job in the global job dictionary. If it's gone from here # then a new block was announced which wiped it try: job = self.net_state['jobs'][jobid] except KeyError: self.send_error(self.STALE_SHARE) self.server_state['reject_stale'].incr(difficulty) return self.STALE_SHARE, difficulty # assemble a complete block header bytestring header = job.block_header( nonce=params[4], extra1=self.id, extra2=params[2], ntime=params[3]) # Check a submitted share against previous shares to eliminate # duplicates share = (self.id, params[2], params[4], params[3]) if share in job.acc_shares: self.logger.info("Duplicate share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.DUP_SHARE) self.server_state['reject_dup'].incr(difficulty) return self.DUP_SHARE, difficulty job_target = target_from_diff(difficulty, self.config['diff1']) hash_int = self.config['pow_func'](header) if hash_int >= job_target: self.logger.info("Low diff share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.LOW_DIFF) self.server_state['reject_low'].incr(difficulty) return self.LOW_DIFF, difficulty # we want to send an ack ASAP, so do it here self.send_success(self.msg_id) self.logger.debug("Valid share accepted from worker {}.{}!" .format(self.address, self.worker)) # Add the share to the accepted set to check for dups job.acc_shares.add(share) self.server_state['shares'].incr(difficulty) header_hash = sha256(sha256(header).digest()).digest()[::-1] header_hash_int = bitcoin_data.hash256(header) def check_merged_block(mm_later): aux_work, index, hashes = mm_later if hash_int <= aux_work['target']: monitor = aux_work['monitor'] self.server_state['aux_state'][monitor.name]['solves'] += 1 self.logger.log(36, "New {} Aux Block identified!".format(monitor.name)) aux_block = ( pack.IntType(256, 'big').pack(aux_work['hash']).encode('hex'), bitcoin_data.aux_pow_type.pack(dict( merkle_tx=dict( tx=bitcoin_data.tx_type.unpack(job.coinbase.raw), block_hash=header_hash_int, merkle_link=job.merkle_link, ), merkle_link=bitcoin_data.calculate_merkle_link(hashes, index), parent_block_header=bitcoin_data.block_header_type.unpack(header), )).encode('hex'), ) retries = 0 while retries < 5: retries += 1 new_height = self.server_state['aux_state'][monitor.name]['height'] + 1 try: res = aux_work['merged_proxy'].getauxblock(*aux_block) except (CoinRPCException, socket.error, ValueError) as e: self.logger.error("{} Aux block failed to submit to the server {}!" .format(monitor.name), exc_info=True) self.logger.error(getattr(e, 'error')) if res is True: self.logger.info("NEW {} Aux BLOCK ACCEPTED!!!".format(monitor.name)) self.server_state['aux_state'][monitor.name]['block_solve'] = int(time()) self.server_state['aux_state'][monitor.name]['accepts'] += 1 self.server_state['aux_state'][monitor.name]['recent_blocks'].append( dict(height=new_height, timestamp=int(time()))) if monitor.send: self.logger.info("Submitting {} new block to celery".format(monitor.name)) try: hsh = aux_work['merged_proxy'].getblockhash(new_height) except Exception: self.logger.info("", exc_info=True) hsh = '' try: block = aux_work['merged_proxy'].getblock(hsh) except Exception: self.logger.info("", exc_info=True) try: trans = aux_work['merged_proxy'].gettransaction(block['tx'][0]) amount = trans['details'][0]['amount'] except Exception: self.logger.info("", exc_info=True) amount = -1 self.celery.send_task_pp( 'add_block', self.address, new_height, int(amount * 100000000), -1, "%0.6X" % bitcoin_data.FloatingInteger.from_target_upper_bound(aux_work['target']).bits, hsh, merged=monitor.celery_id) break # break retry loop if success else: self.logger.error( "{} Aux Block failed to submit to the server, " "server returned {}!".format(monitor.name, res), exc_info=True) sleep(1) else: self.server_state['aux_state'][monitor.name]['rejects'] += 1 for mm in job.mm_later: spawn(check_merged_block, mm) # valid network hash? if hash_int > job.bits_target: return self.VALID_SHARE, difficulty block = hexlify(job.submit_serial(header)) def submit_block(conn): retries = 0 while retries < 5: retries += 1 res = "failed" try: res = conn.getblocktemplate({'mode': 'submit', 'data': block}) except (CoinRPCException, socket.error, ValueError) as e: self.logger.info("Block failed to submit to the server {} with submitblock!" .format(conn.name)) if getattr(e, 'error', {}).get('code', 0) != -8: self.logger.error(getattr(e, 'error'), exc_info=True) try: res = conn.submitblock(block) except (CoinRPCException, socket.error, ValueError) as e: self.logger.error("Block failed to submit to the server {}!" .format(conn.name), exc_info=True) self.logger.error(getattr(e, 'error')) if res is None: self.net_state['work']['accepts'] += 1 self.net_state['work']['recent_blocks'].append( dict(height=job.block_height, timestamp=int(time()))) hash_hex = hexlify(header_hash) self.celery.send_task_pp( 'add_block', self.address, job.block_height, job.total_value, job.fee_total, hexlify(job.bits), hash_hex) self.logger.info("NEW BLOCK ACCEPTED by {}!!!" .format(conn.name)) self.server_state['block_solve'] = int(time()) break # break retry loop if success else: self.logger.error( "Block failed to submit to the server {}, " "server returned {}!".format(conn.name, res), exc_info=True) sleep(1) self.logger.info("Retry {} for connection {}".format(retries, conn.name)) else: self.net_state['work']['rejects'] += 1 for conn in self.net_state['live_connections']: # spawn a new greenlet for each submission to do them all async. # lower orphan chance spawn(submit_block, conn) try: self.logger.log(35, "Valid network block identified!") self.logger.info("New block at height %i" % self.net_state['work']['height']) self.logger.info("Block coinbase hash %s" % job.coinbase.lehexhash) self.logger.log(35, "New block hex dump:\n{}".format(block)) self.logger.log(35, "Coinbase: {}".format(str(job.coinbase.to_dict()))) for trans in job.transactions: self.logger.log(35, str(trans.to_dict())) except Exception: # because I'm paranoid... self.logger.error("Unexcpected exception in block logging!", exc_info=True) return self.BLOCK_FOUND, difficulty
def submit_job(self, data): """ Handles recieving work submission and checking that it is valid , if it meets network diff, etc. Sends reply to stratum client. """ params = data['params'] # [worker_name, job_id, extranonce2, ntime, nonce] # ["slush.miner1", "bf", "00000001", "504e86ed", "b2957c02"] self.logger.debug( "Recieved work submit:\n\tworker_name: {0}\n\t" "job_id: {1}\n\textranonce2: {2}\n\t" "ntime: {3}\n\tnonce: {4} ({int_nonce})" .format( *params, int_nonce=struct.unpack(str("<L"), unhexlify(params[4])))) try: difficulty, jobid = self.job_mapper[data['params'][1]] except KeyError: # since we can't identify the diff we just have to assume it's # current diff self.send_error(self.STALE_SHARE) self.server_state['reject_stale'].incr(self.difficulty) return self.STALE_SHARE, self.difficulty # lookup the job in the global job dictionary. If it's gone from here # then a new block was announced which wiped it try: job = self.net_state['jobs'][jobid] except KeyError: self.send_error(self.STALE_SHARE) self.server_state['reject_stale'].incr(difficulty) return self.STALE_SHARE, difficulty # assemble a complete block header bytestring header = job.block_header( nonce=params[4], extra1=self.id, extra2=params[2], ntime=params[3]) # Check a submitted share against previous shares to eliminate # duplicates share = (self.id, params[2], params[4], params[3]) if share in job.acc_shares: self.logger.info("Duplicate share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.DUP_SHARE) self.server_state['reject_dup'].incr(difficulty) return self.DUP_SHARE, difficulty job_target = target_from_diff(difficulty, self.config['diff1']) hash_int = self.config['pow_func'](header) if hash_int >= job_target: self.logger.info("Low diff share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.LOW_DIFF) self.server_state['reject_low'].incr(difficulty) return self.LOW_DIFF, difficulty # we want to send an ack ASAP, so do it here self.send_success(self.msg_id) self.logger.debug("Valid share accepted from worker {}.{}!" .format(self.address, self.worker)) # Add the share to the accepted set to check for dups job.acc_shares.add(share) self.server_state['shares'].incr(difficulty) header_hash = sha256(sha256(header).digest()).digest()[::-1] header_hash_int = bitcoin_data.hash256(header) def check_merged_block(mm_later): aux_work, index, hashes = mm_later if hash_int <= aux_work['target']: monitor = aux_work['monitor'] self.server_state['aux_state'][monitor.name]['solves'] += 1 self.logger.log(36, "New {} Aux Block identified!".format(monitor.name)) aux_block = ( pack.IntType(256, 'big').pack(aux_work['hash']).encode('hex'), bitcoin_data.aux_pow_type.pack(dict( merkle_tx=dict( tx=bitcoin_data.tx_type.unpack(job.coinbase.raw), block_hash=header_hash_int, merkle_link=job.merkle_link, ), merkle_link=bitcoin_data.calculate_merkle_link(hashes, index), parent_block_header=bitcoin_data.block_header_type.unpack(header), )).encode('hex'), ) retries = 0 while retries < 5: retries += 1 new_height = self.server_state['aux_state'][monitor.name]['height'] + 1 try: res = aux_work['merged_proxy'].getauxblock(*aux_block) except (CoinRPCException, socket.error, ValueError) as e: self.logger.error("{} Aux block failed to submit to the server {}!" .format(monitor.name), exc_info=True) self.logger.error(getattr(e, 'error')) if res is True: self.logger.info("NEW {} Aux BLOCK ACCEPTED!!!".format(monitor.name)) self.server_state['aux_state'][monitor.name]['block_solve'] = int(time()) if monitor.send: self.logger.info("Submitting {} new block to celery".format(monitor.name)) try: hsh = aux_work['merged_proxy'].getblockhash(new_height) except Exception: self.logger.info("", exc_info=True) hsh = '' try: block = aux_work['merged_proxy'].getblock(hsh) except Exception: self.logger.info("", exc_info=True) try: trans = aux_work['merged_proxy'].gettransaction(block['tx'][0]) amount = trans['details'][0]['amount'] except Exception: self.logger.info("", exc_info=True) amount = -1 self.celery.send_task_pp( 'add_block', self.address, new_height, int(amount * 100000000), -1, "%0.6X" % bitcoin_data.FloatingInteger.from_target_upper_bound(aux_work['target']).bits, hsh, merged=monitor.celery_id) break # break retry loop if success else: self.logger.error( "{} Aux Block failed to submit to the server, " "server returned {}!".format(monitor.name, res), exc_info=True) sleep(1) for mm in job.mm_later: spawn(check_merged_block, mm) # valid network hash? if hash_int > job.bits_target: return self.VALID_SHARE, difficulty try: self.logger.log(35, "Valid network block identified!") self.logger.info("New block at height %i" % self.net_state['current_height']) self.logger.info("Block coinbase hash %s" % job.coinbase.lehexhash) block = hexlify(job.submit_serial(header)) self.logger.log(35, "New block hex dump:\n{}".format(block)) self.logger.log(35, "Coinbase: {}".format(str(job.coinbase.to_dict()))) for trans in job.transactions: self.logger.log(35, str(trans.to_dict())) except Exception: # because I'm paranoid... self.logger.error("Unexcpected exception in block logging!", exc_info=True) def submit_block(conn): retries = 0 while retries < 5: retries += 1 res = "failed" try: res = conn.getblocktemplate({'mode': 'submit', 'data': block}) except (CoinRPCException, socket.error, ValueError) as e: self.logger.info("Block failed to submit to the server {} with submitblock!" .format(conn.name)) if getattr(e, 'error', {}).get('code', 0) != -8: self.logger.error(getattr(e, 'error'), exc_info=True) try: res = conn.submitblock(block) except (CoinRPCException, socket.error, ValueError) as e: self.logger.error("Block failed to submit to the server {}!" .format(conn.name), exc_info=True) self.logger.error(getattr(e, 'error')) if res is None: hash_hex = hexlify(header_hash) self.celery.send_task_pp( 'add_block', self.address, self.net_state['current_height'] + 1, job.total_value, job.fee_total, hexlify(job.bits), hash_hex) self.logger.info("NEW BLOCK ACCEPTED by {}!!!" .format(conn.name)) self.server_state['block_solve'] = int(time()) break # break retry loop if success else: self.logger.error( "Block failed to submit to the server {}, " "server returned {}!".format(conn.name, res), exc_info=True) sleep(1) self.logger.info("Retry {} for connection {}".format(retries, conn.name)) for conn in self.net_state['live_connections']: # spawn a new greenlet for each submission to do them all async. # lower orphan chance spawn(submit_block, conn) return self.BLOCK_FOUND, difficulty