def queue_blocks(self): if self.total_to_process == 0: print('Nothing to queue') return txout_ids = self.db_session.query(db.TxOut.id).filter( # db.TxOut.id >= self.min_id db.TxOut.spent.is_(None) | db.TxOut.spent.is_(False)).yield_per( BLOCK).enable_eagerloads(False) for chunk in chunked(txout_ids, BLOCK): self.queue.put([txout_id for (txout_id, ) in chunk]) self.queued_blocks += 1 if self.queued_blocks % 1000 == 0: print '%r queued' % (self.queued_blocks, )
def queue_blocks(self): if self.total_to_process == 0: print('Nothing to queue') return txin_ids = self.db_session.query(db.TxIn.id).filter( db.TxIn.txout_id.is_(None) & (db.TxIn.previous_output_transaction_hash != 32 * '\x00')).yield_per(BLOCK).enable_eagerloads(False) for chunk in chunked(txin_ids, BLOCK): self.queue.put([txin_id for (txin_id, ) in chunk]) self.queued_blocks += 1 if self.queued_blocks % 1000 == 0: print '%r queued' % (self.queued_blocks, )
def queue_blocks(self): if self.total_to_process == 0: print('Nothing to queue') return txout_ids = self.db_session.query(db.TxOut.id).filter( # db.TxOut.id >= self.min_id db.TxOut.spent.is_(None) | db.TxOut.spent.is_(False) ).yield_per(BLOCK).enable_eagerloads(False) for chunk in chunked(txout_ids, BLOCK): self.queue.put([txout_id for (txout_id,) in chunk]) self.queued_blocks += 1 if self.queued_blocks % 1000 == 0: print '%r queued' % (self.queued_blocks,)
def queue_blocks(self): if self.total_to_process == 0: print('Nothing to queue') return txin_ids = self.db_session.query(db.TxIn.id).filter( db.TxIn.txout_id.is_(None) & (db.TxIn.previous_output_transaction_hash != 32 * '\x00') ).yield_per(BLOCK).enable_eagerloads(False) for chunk in chunked(txin_ids, BLOCK): self.queue.put([txin_id for (txin_id,) in chunk]) self.queued_blocks += 1 if self.queued_blocks % 1000 == 0: print '%r queued' % (self.queued_blocks,)
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('txin_txout', sa.Column('txin_id', sa.Integer(), nullable=False), sa.Column('txout_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint( ['txin_id'], ['txin.id'], ), sa.ForeignKeyConstraint( ['txout_id'], ['txout.id'], ), sa.PrimaryKeyConstraint('txin_id', 'txout_id')) # INSERT INTO txin_txout (txin_id, txout_id) SELECT id, txout_id FROM txin WHERE txout_id IS NOT NULL conn = op.get_bind() session = db.Session(bind=conn) num_to_insert = session.query(sa.func.count(db.TxIn.id)).filter( db.TxIn.txout_id.isnot(None)).scalar() print('Going to insert %u records into txin_txout' % (num_to_insert, )) start_time = datetime.now() num_inserted = 0 for chunk in chunked(session.query(db.TxIn.id, db.TxIn.txout_id).filter( db.TxIn.txout_id.isnot(None)).yield_per(CHUNK_SIZE), chunk_size=CHUNK_SIZE): query_start = datetime.now() conn.execute(db.TxIn_TxOut.__table__.insert().values(chunk)) query_end = datetime.now() num_inserted += len(chunk) tot_time = query_end - start_time avg_time = tot_time / num_inserted print( '%u / %u %.3f%% done, %u inserted, %s for query, %s total, %s avg, ~%s remaining' % (num_inserted, num_to_insert, num_inserted * 100.0 / num_to_insert, len(chunk), query_end - query_start, tot_time, avg_time, avg_time * (num_to_insert - num_inserted))) # Create indexes after inserting data op.create_index(op.f('ix_txin_txout_txin_id'), 'txin_txout', ['txin_id'], unique=False) op.create_index('ix_txin_txout_txin_id_txout_id', 'txin_txout', ['txin_id', 'txout_id'], unique=True) op.create_index(op.f('ix_txin_txout_txout_id'), 'txin_txout', ['txout_id'], unique=False)
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('txin_txout', sa.Column('txin_id', sa.Integer(), nullable=False), sa.Column('txout_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['txin_id'], ['txin.id'], ), sa.ForeignKeyConstraint(['txout_id'], ['txout.id'], ), sa.PrimaryKeyConstraint('txin_id', 'txout_id') ) # INSERT INTO txin_txout (txin_id, txout_id) SELECT id, txout_id FROM txin WHERE txout_id IS NOT NULL conn = op.get_bind() session = db.Session(bind=conn) num_to_insert = session.query(sa.func.count(db.TxIn.id)).filter(db.TxIn.txout_id.isnot(None)).scalar() print('Going to insert %u records into txin_txout' % (num_to_insert,)) start_time = datetime.now() num_inserted = 0 for chunk in chunked( session.query( db.TxIn.id, db.TxIn.txout_id ).filter( db.TxIn.txout_id.isnot(None) ).yield_per(CHUNK_SIZE), chunk_size=CHUNK_SIZE ): query_start = datetime.now() conn.execute(db.TxIn_TxOut.__table__.insert().values(chunk)) query_end = datetime.now() num_inserted += len(chunk) tot_time = query_end - start_time avg_time = tot_time / num_inserted print('%u / %u %.3f%% done, %u inserted, %s for query, %s total, %s avg, ~%s remaining' % ( num_inserted, num_to_insert, num_inserted * 100.0 / num_to_insert, len(chunk), query_end - query_start, tot_time, avg_time, avg_time * (num_to_insert - num_inserted))) # Create indexes after inserting data op.create_index(op.f('ix_txin_txout_txin_id'), 'txin_txout', ['txin_id'], unique=False) op.create_index('ix_txin_txout_txin_id_txout_id', 'txin_txout', ['txin_id', 'txout_id'], unique=True) op.create_index(op.f('ix_txin_txout_txout_id'), 'txin_txout', ['txout_id'], unique=False)