def rpc_getblocktemplate(self, param): if param['mode'] == 'template': pass elif param['mode'] == 'submit': result = param['data'] block = gvidon_data.block_type.unpack(result.decode('hex')) if sum(tx_out['value'] for tx_out in block['txs'][0]['tx_outs']) != sum(tx['tx_outs'][0]['value'] for tx in block['txs'][1:]) + 5000000000: print 'invalid fee' if block['header']['previous_block'] != self.blocks[-1]: return False if gvidon_data.hash256(result.decode('hex')) > block['header']['bits'].target: return False header_hash = gvidon_data.hash256(gvidon_data.block_header_type.pack(block['header'])) self.blocks.append(header_hash) self.headers[header_hash] = block['header'] reactor.callLater(0, self.new_block.happened) return True else: raise jsonrpc.Error_for_code(-1)('invalid request') txs = [] for i in xrange(100): fee = i txs.append(dict( data=gvidon_data.tx_type.pack(dict(version=1, tx_ins=[], tx_outs=[dict(value=fee, script='hello!'*100)], lock_time=0)).encode('hex'), fee=fee, )) return { "version" : 3, "previousblockhash" : '%064x' % (self.blocks[-1],), "transactions" : txs, "coinbaseaux" : { "flags" : "062f503253482f" }, "coinbasevalue" : 5000000000 + sum(tx['fee'] for tx in txs), "target" : "00000000000044b9f20000000000000000000000000000000000000000000000", "mintime" : 1351655621, "mutable" : [ "time", "transactions", "prevblock" ], "noncerange" : "00000000ffffffff", "sigoplimit" : 20000, "sizelimit" : 1000000, "curtime" : 1351659940, "bits" : "21008000", "height" : len(self.blocks), }
def handle_shares(self, shares, peer): if len(shares) > 5: print 'Processing %i shares from %s...' % (len(shares), '%s:%i' % peer.addr if peer is not None else None) new_count = 0 all_new_txs = {} for share, new_txs in shares: if new_txs is not None: all_new_txs.update((gvidon_data.hash256(gvidon_data.tx_type.pack(new_tx)), new_tx) for new_tx in new_txs) if share.hash in self.node.tracker.items: #print 'Got duplicate share, ignoring. Hash: %s' % (p2pool_data.format_hash(share.hash),) continue new_count += 1 #print 'Received share %s from %r' % (p2pool_data.format_hash(share.hash), share.peer_addr) self.node.tracker.add(share) self.node.known_txs_var.add(all_new_txs) if new_count: self.node.set_best_share() if len(shares) > 5: print '... done processing %i shares. New: %i Have: %i/~%i' % (len(shares), new_count, len(self.node.tracker.items), 2*self.node.net.CHAIN_LENGTH)
def test_tx_hash(self): assert data.hash256( data.tx_type.pack( dict( version=1, tx_ins=[ dict( previous_output=None, sequence=None, script= '70736a0468860e1a0452389500522cfabe6d6d2b2f33cf8f6291b184f1b291d24d82229463fcec239afea0ee34b4bfc622f62401000000000000004d696e656420627920425443204775696c6420ac1eeeed88' .decode('hex'), ) ], tx_outs=[ dict( value=5003880250, script=data.pubkey_hash_to_script2( pack.IntType(160).unpack( 'ca975b00a8c203b8692f5a18d92dc5c2d2ebc57b'. decode('hex'))), ) ], lock_time=0, )) ) == 0xb53802b2333e828d6532059f46ecf6b313a42d79f97925e457fbbfda45367e5c
def submit_block_p2p(block, factory, net): if factory.conn.value is None: print >> sys.stderr, 'No gvidond connection when block submittal attempted! %s%064x' % ( net.PARENT.BLOCK_EXPLORER_URL_PREFIX, gvidon_data.hash256( gvidon_data.block_header_type.pack(block['header']))) raise deferral.RetrySilentlyException() factory.conn.value.send_block(block=block)
def test_hashlink3(self): for i in xrange(100): d = random_bytes(random.randrange(2048)) d2 = random_bytes(random.randrange(200)) d3 = random_bytes(random.randrange(2048)) x = data.prefix_to_hash_link(d + d2, d2) assert data.check_hash_link(x, d3, d2) == gvidon_data.hash256(d + d2 + d3)
def check(self, tracker): from p2pool import p2p if self.share_data['previous_share_hash'] is not None: previous_share = tracker.items[ self.share_data['previous_share_hash']] if type(self) is type(previous_share): pass elif type(self) is type(previous_share).SUCCESSOR: if tracker.get_height( previous_share.hash) < self.net.CHAIN_LENGTH: from p2pool import p2p raise p2p.PeerMisbehavingError( 'switch without enough history') # switch only valid if 85% of hashes in [self.net.CHAIN_LENGTH*9//10, self.net.CHAIN_LENGTH] for new version counts = get_desired_version_counts( tracker, tracker.get_nth_parent_hash( previous_share.hash, self.net.CHAIN_LENGTH * 9 // 10), self.net.CHAIN_LENGTH // 10) if counts.get(self.VERSION, 0) < sum(counts.itervalues()) * 85 // 100: raise p2p.PeerMisbehavingError( 'switch without enough hash power upgraded') else: raise p2p.PeerMisbehavingError( '''%s can't follow %s''' % (type(self).__name__, type(previous_share).__name__)) other_tx_hashes = [ tracker.items[tracker.get_nth_parent_hash( self.hash, share_count)].share_info['new_transaction_hashes'][tx_count] for share_count, tx_count in self.iter_transaction_hash_refs() ] share_info, gentx, other_tx_hashes2, get_share = self.generate_transaction( tracker, self.share_info['share_data'], self.header['bits'].target, self.share_info['timestamp'], self.share_info['bits'].target, self.contents['ref_merkle_link'], [(h, None) for h in other_tx_hashes], self.net, last_txout_nonce=self.contents['last_txout_nonce']) assert other_tx_hashes2 == other_tx_hashes if share_info != self.share_info: raise ValueError('share_info invalid') if gvidon_data.hash256( gvidon_data.tx_type.pack(gentx)) != self.gentx_hash: raise ValueError('''gentx doesn't match hash_link''') if gvidon_data.calculate_merkle_link([None] + other_tx_hashes, 0) != self.merkle_link: raise ValueError('merkle_link and other_tx_hashes do not match') return gentx # only used by as_block
def get_ref_hash(cls, net, share_info, ref_merkle_link): return pack.IntType(256).pack( gvidon_data.check_merkle_link( gvidon_data.hash256( cls.ref_type.pack( dict( identifier=net.IDENTIFIER, share_info=share_info, ))), ref_merkle_link))
def test_get_block(self): factory = p2p.ClientFactory(networks.nets['gvidon']) c = reactor.connectTCP('127.0.0.1', 9900, factory) try: h = 0x00000000000132b9afeca5e9a2fdf4477338df6dcff1342300240bc70397c4bb block = yield deferral.retry()(defer.inlineCallbacks(lambda: defer.returnValue((yield (yield factory.getProtocol()).get_block(h)))))() assert data.merkle_hash(map(data.hash256, map(data.tx_type.pack, block['txs']))) == block['header']['merkle_root'] assert data.hash256(data.block_header_type.pack(block['header'])) == h finally: factory.stopTrying() c.disconnect()
def test_header_hash(self): assert data.hash256( data.block_header_type.pack( dict( version=1, previous_block= 0x000000000000038a2a86b72387f93c51298298a732079b3b686df3603d2f6282, merkle_root= 0x37a43a3b812e4eb665975f46393b4360008824aab180f27d642de8c28073bc44, timestamp=1323752685, bits=data.FloatingInteger(437159528), nonce=3658685446, )) ) == 0x000000000000003aaaf7638f9f9c0d0c60e8b0eb817dcdb55fd2b1964efc5175
def handle_remember_tx(self, tx_hashes, txs): for tx_hash in tx_hashes: if tx_hash in self.remembered_txs: print >> sys.stderr, 'Peer referenced transaction twice, disconnecting' self.disconnect() return if tx_hash in self.node.known_txs_var.value: tx = self.node.known_txs_var.value[tx_hash] else: for cache in self.known_txs_cache.itervalues(): if tx_hash in cache: tx = cache[tx_hash] print 'Transaction %064x rescued from peer latency cache!' % ( tx_hash, ) break else: print >> sys.stderr, 'Peer referenced unknown transaction %064x, disconnecting' % ( tx_hash, ) self.disconnect() return self.remembered_txs[tx_hash] = tx self.remembered_txs_size += 100 + gvidon_data.tx_type.packed_size( tx) added_known_txs = {} warned = False for tx in txs: tx_hash = gvidon_data.hash256(gvidon_data.tx_type.pack(tx)) if tx_hash in self.remembered_txs: print >> sys.stderr, 'Peer referenced transaction twice, disconnecting' self.disconnect() return if tx_hash in self.node.known_txs_var.value and not warned: print 'Peer sent entire transaction %064x that was already received' % ( tx_hash, ) warned = True self.remembered_txs[tx_hash] = tx self.remembered_txs_size += 100 + gvidon_data.tx_type.packed_size( tx) added_known_txs[tx_hash] = tx self.node.known_txs_var.add(added_known_txs) if self.remembered_txs_size >= self.max_remembered_txs_size: raise PeerMisbehavingError('too much transaction data stored')
def rpc_submit(self, worker_name, job_id, extranonce2, ntime, nonce): if job_id not in self.handler_map: print >> sys.stderr, '''Couldn't link returned work's job id with its handler. This should only happen if this process was recently restarted!''' return False x, got_response = self.handler_map[job_id] coinb_nonce = extranonce2.decode('hex') assert len(coinb_nonce) == self.wb.COINBASE_NONCE_LENGTH new_packed_gentx = x['coinb1'] + coinb_nonce + x['coinb2'] header = dict( version=x['version'], previous_block=x['previous_block'], merkle_root=gvidon_data.check_merkle_link( gvidon_data.hash256(new_packed_gentx), x['merkle_link']), timestamp=pack.IntType(32).unpack( getwork._swap4(ntime.decode('hex'))), bits=x['bits'], nonce=pack.IntType(32).unpack(getwork._swap4(nonce.decode('hex'))), ) return got_response(header, worker_name, coinb_nonce)
def got_conn(self, conn): p2p.Node.got_conn(self, conn) yield deferral.sleep(.5) new_mining_txs = dict(self.mining_txs_var.value) for i in xrange(3): huge_tx = dict( version=0, tx_ins=[], tx_outs=[dict( value=0, script='x' * 900000, )], lock_time=i, ) new_mining_txs[gvidon_data.hash256( gvidon_data.tx_type.pack(huge_tx))] = huge_tx self.mining_txs_var.set(new_mining_txs) self.sent_time = reactor.seconds()
def _getwork(self, request, data, long_poll): request.setHeader('X-Long-Polling', '/long-polling') request.setHeader('X-Roll-NTime', 'expire=100') request.setHeader('X-Is-P2Pool', 'true') if request.getHeader('Host') is not None: request.setHeader('X-Stratum', 'stratum+tcp://' + request.getHeader('Host')) if data is not None: header = getwork.decode_data(data) if header['merkle_root'] not in self.merkle_root_to_handler: print >> sys.stderr, '''Couldn't link returned work's merkle root with its handler. This should only happen if this process was recently restarted!''' defer.returnValue(False) defer.returnValue( self.merkle_root_to_handler[header['merkle_root']]( header, request.getUser() if request.getUser() is not None else '', '\0' * self.worker_bridge.COINBASE_NONCE_LENGTH)) if p2pool.DEBUG: id = random.randrange(1000, 10000) print 'POLL %i START is_long_poll=%r user_agent=%r user=%r' % ( id, long_poll, request.getHeader('User-Agent'), request.getUser()) if long_poll: request_id = request.getClientIP(), request.getHeader( 'Authorization') if self.worker_views.get( request_id, self.worker_bridge.new_work_event.times ) != self.worker_bridge.new_work_event.times: if p2pool.DEBUG: print 'POLL %i PUSH' % (id, ) else: if p2pool.DEBUG: print 'POLL %i WAITING' % (id, ) yield self.worker_bridge.new_work_event.get_deferred() self.worker_views[ request_id] = self.worker_bridge.new_work_event.times x, handler = self.worker_bridge.get_work( *self.worker_bridge.preprocess_request( request.getUser() if request.getUser() is not None else '')) res = getwork.BlockAttempt( version=x['version'], previous_block=x['previous_block'], merkle_root=gvidon_data.check_merkle_link( gvidon_data.hash256(x['coinb1'] + '\0' * self.worker_bridge.COINBASE_NONCE_LENGTH + x['coinb2']), x['merkle_link']), timestamp=x['timestamp'], bits=x['bits'], share_target=x['share_target'], ) assert res.merkle_root not in self.merkle_root_to_handler self.merkle_root_to_handler[res.merkle_root] = handler if p2pool.DEBUG: print 'POLL %i END identifier=%i' % ( id, self.worker_bridge.new_work_event.times) extra_params = {} if request.getHeader('User-Agent') == 'Jephis PIC Miner': # ASICMINER BE Blades apparently have a buffer overflow bug and # can't handle much extra in the getwork response extra_params = {} else: extra_params = dict(identifier=str( self.worker_bridge.new_work_event.times), submitold=True) defer.returnValue(res.getwork(**extra_params))
def from_header(cls, header): return cls( gvidon_data.hash256(gvidon_data.block_header_type.pack(header)), header['previous_block'])
def _(tx): self.known_txs_var.add({ gvidon_data.hash256(gvidon_data.tx_type.pack(tx)): tx, })
def test_hashlink1(self): for i in xrange(100): d = random_bytes(random.randrange(2048)) x = data.prefix_to_hash_link(d) assert data.check_hash_link(x, '') == gvidon_data.hash256(d)