Beispiel #1
0
def store_smf_list(db, exp_tabname, new_exps):
    """ Store a human-readable list of loaded SMF files and their exp_ids,
	    for fast lookup by ps1-load and lsd-make-object-catalog.

	    Note: For forward-compatibility, readers of this file should assume it has an
	    unspecified number of columns. I.e., don't do (a, b) = line.split(), but
	    do (a, b) = line.split()[:2]
	"""
    uri = 'lsd:%s:cache:all_exposures.txt' % (exp_tabname)

    # Try to load from cache, query otherwise
    try:
        with db.open_uri(uri) as f:
            old_exps = np.loadtxt(f,
                                  dtype=[('_EXP', 'u8'), ('smf_fn', 'a40')],
                                  ndmin=1)
    except IOError:
        old_exps = db.query("select _EXP, smf_fn from %s" % exp_tabname).fetch(
            progress_callback=pool2.progress_pass)

    exps = colgroup.fromiter([old_exps, new_exps],
                             blocks=True) if len(old_exps) else new_exps

    # Store to cache
    with db.open_uri(uri, mode='w') as f:
        for exp_id, exp_fn in exps:
            f.write("%d %s\n" % (exp_id, exp_fn))
Beispiel #2
0
def _sanity_check_object_table_reducer(kv, db, det_tabname, explist):
	cell_id, rowlist = kv
	rows = colgroup.fromiter(rowlist, blocks=True)
	rows.sort(["det_id"])

	# Verify that each detection appears only once (cond #1)
	if not np.all(np.diff(rows.det_id) != 0):
		#a1 = np.arange(len(rows))[np.diff(rows.det_id) == 0]
		with open("cell.%s.txt" % cell_id, 'w') as fp:
			for row in rows:
				fp.write("%s\t%s\n" % (row['det_id'], row['obj_id']))
		print "ERROR -- same detection assigned to multiple objects"
		x = rows.det_id
		a1 = np.arange(len(x))[np.diff(x) == 0]
		print rows[a1]
		print rows[a1+1]
	#assert np.all(np.diff(rows.det_id) != 0)

	# Verify that all detections in this cell are linked (cond #2)
	# This can be restricted to only detections from exposures existing in explist
	det_rows = db.query("_ID as det_id, _EXP as exp_id FROM '%s'" % det_tabname).fetch_cell(cell_id)
	if explist is not None:
		det_rows = det_rows[np.in1d(det_rows.exp_id, explist)]
	det_rows.sort(["det_id"])
	
	ok = np.all(np.unique(rows.det_id) == det_rows.det_id)
	if not ok:
		print "ERROR -- Not all detections were linked to objects (need to rerun make-object-catalog?): nlinked=%d ntotal=%d cell_id=%s" % (len(rows.det_id), len(det_rows), cell_id)

	yield ok
Beispiel #3
0
def _sanity_check_object_table_reducer(kv, db, det_tabname, explist):
    cell_id, rowlist = kv
    rows = colgroup.fromiter(rowlist, blocks=True)
    rows.sort(["det_id"])

    # Verify that each detection appears only once (cond #1)
    if not np.all(np.diff(rows.det_id) != 0):
        #a1 = np.arange(len(rows))[np.diff(rows.det_id) == 0]
        with open("cell.%s.txt" % cell_id, 'w') as fp:
            for row in rows:
                fp.write("%s\t%s\n" % (row['det_id'], row['obj_id']))
        print "ERROR -- same detection assigned to multiple objects"
        x = rows.det_id
        a1 = np.arange(len(x))[np.diff(x) == 0]
        print rows[a1]
        print rows[a1 + 1]
    #assert np.all(np.diff(rows.det_id) != 0)

    # Verify that all detections in this cell are linked (cond #2)
    # This can be restricted to only detections from exposures existing in explist
    det_rows = db.query("_ID as det_id, _EXP as exp_id FROM '%s'" %
                        det_tabname).fetch_cell(cell_id)
    if explist is not None:
        det_rows = det_rows[np.in1d(det_rows.exp_id, explist)]
    det_rows.sort(["det_id"])

    ok = np.all(np.unique(rows.det_id) == det_rows.det_id)
    if not ok:
        print "ERROR -- Not all detections were linked to objects (need to rerun make-object-catalog?): nlinked=%d ntotal=%d cell_id=%s" % (
            len(rows.det_id), len(det_rows), cell_id)

    yield ok
Beispiel #4
0
def store_smf_list(db, exp_tabname, new_exps):
	""" Store a human-readable list of loaded SMF files and their exp_ids,
	    for fast lookup by ps1-load and lsd-make-object-catalog.

	    Note: For forward-compatibility, readers of this file should assume it has an
	    unspecified number of columns. I.e., don't do (a, b) = line.split(), but
	    do (a, b) = line.split()[:2]
	"""
	uri = 'lsd:%s:cache:all_exposures.txt' % (exp_tabname)
	
	# Try to load from cache, query otherwise
	try:
		with db.open_uri(uri) as f:
			old_exps = np.loadtxt(f, dtype=[('_EXP', 'u8'),('smf_fn', 'a40')], ndmin=1)
	except IOError:
		old_exps = db.query("select _EXP, smf_fn from %s" % exp_tabname).fetch(progress_callback=pool2.progress_pass)

	exps = colgroup.fromiter([old_exps, new_exps], blocks=True) if len(old_exps) else new_exps

	# Store to cache
	with db.open_uri(uri, mode='w') as f:
		for exp_id, exp_fn in exps:
			f.write("%d %s\n" % (exp_id, exp_fn))