def compute_counts(db, tabname, force=False): if not force: # Use the saved count from previous snapshot, and # just add the rows added by this one assert db.in_transaction() # Load the count that was valid before this transaction db2 = DB(':'.join(db.path)) try: nrows = db2.table(tabname).nrows() new = False except IOError: # This is a new table nrows = 0 new = True # Fetch all cells modified by this transaction cells = db.table(tabname).get_cells_in_snapshot(db.snapid) if not new: # Count up rows before modification nrows_old = compute_counts_aux( db2, tabname, cells, progress_callback=pool2.progress_pct_nnl) else: nrows_old = 0 # Count up rows in modified cells nrows_new = compute_counts_aux(db, tabname, cells) # The new row count nrows += nrows_new - nrows_old else: # Directly count everything cells = db.table(tabname).get_cells() nrows = compute_counts_aux(db, tabname, cells) return nrows
def compute_counts(db, tabname, force=False): if not force: # Use the saved count from previous snapshot, and # just add the rows added by this one assert db.in_transaction() # Load the count that was valid before this transaction db2 = DB(':'.join(db.path)) try: nrows = db2.table(tabname).nrows() new = False except IOError: # This is a new table nrows = 0 new = True # Fetch all cells modified by this transaction cells = db.table(tabname).get_cells_in_snapshot(db.snapid) if not new: # Count up rows before modification nrows_old = compute_counts_aux(db2, tabname, cells, progress_callback=pool2.progress_pct_nnl) else: nrows_old = 0 # Count up rows in modified cells nrows_new = compute_counts_aux(db, tabname, cells) # The new row count nrows += nrows_new - nrows_old else: # Directly count everything cells = db.table(tabname).get_cells() nrows = compute_counts_aux(db, tabname, cells) return nrows
# Return it as a ColGroup(), keyed to this static cell_id yield static_cell, ColGroup([(key, keys), (oval, vals)]) def _accumulate_and_write(qresult, qwriter, key, val, oval): for static_cell, rows in _accumulator(qresult, key, val, oval): result = qwriter.write(static_cell, rows) yield result # yield 0, [1, 2] if __name__ == '__main__': from join_ops import DB ntot = 0 db = DB('db2') writer = IntoWriter(db, "magbase WHERE obj_id |= obj_id") for band in 'grizy': # for band in 'g': nband = 0 q = db.query( "obj_id, ap_mag, filterid FROM obj, det WHERE filterid == '%s.0000'" % band) for static_cell, rows in q.execute( [(_accumulate_and_write, writer, 'obj_id', 'ap_mag', band)], group_by_static_cell=True): nband += len(rows) ## q = db.query("obj_id, ap_mag, filterid FROM obj, det WHERE filterid == 'y.0000' INTO magbase") ## for static_cell, rows in q.execute([(_accumulator, 'obj_id', 'ap_mag', 'ap_mag')], group_by_static_cell=True): ## nband += len(rows) ntot += nband
vals[i] = np.array(v, dtype=vdtype) # vals[i] = v # Return it as a ColGroup(), keyed to this static cell_id yield static_cell, ColGroup([(key, keys), (oval, vals)]) def _accumulate_and_write(qresult, qwriter, key, val, oval): for static_cell, rows in _accumulator(qresult, key, val, oval): result = qwriter.write(static_cell, rows) yield result # yield 0, [1, 2] if __name__ == '__main__': from join_ops import DB ntot = 0 db = DB('db2') writer = IntoWriter(db, "magbase WHERE obj_id |= obj_id") for band in 'grizy': # for band in 'g': nband = 0 q = db.query("obj_id, ap_mag, filterid FROM obj, det WHERE filterid == '%s.0000'" % band) for static_cell, rows in q.execute([(_accumulate_and_write, writer, 'obj_id', 'ap_mag', band)], group_by_static_cell=True): nband += len(rows) ## q = db.query("obj_id, ap_mag, filterid FROM obj, det WHERE filterid == 'y.0000' INTO magbase") ## for static_cell, rows in q.execute([(_accumulator, 'obj_id', 'ap_mag', 'ap_mag')], group_by_static_cell=True): ## nband += len(rows) ntot += nband print "%s objects in band %s" % (nband, band) db.compute_summary_stats('magbase') print "%s insertions for %s objects." % (ntot, db.table('magbase').nrows()) # for static_cell, rows in q.execute([(_accumulator, 'obj_id', 'ap_mag', 'ap_mag')], group_by_static_cell=True):