Ejemplo n.º 1
0
def import_from_sweeps(db, sdss_tabname, sweep_files, create=False):
	""" Import an SDSS catalog from a collection of SDSS sweep files.

	    Note: Assumes underlying shared storage for all output table
	          cells (i.e., any worker is able to write to any cell).
	"""
	with locking.lock(db.path[0] + "/.__smf-import-lock.lock"):
		if not db.table_exists(sdss_tabname) and create:
			# Create the new database
			sdss_table = db.create_table(sdss_tabname, sdss_table_def)
		else:
			sdss_table = db.table(sdss_tabname)

	t0 = time.time()
	at = 0; ntot = 0
	pool = pool2.Pool()
	for (file, nloaded) in pool.imap_unordered(sweep_files, import_from_sweeps_aux, (db, sdss_tabname), progress_callback=pool2.progress_pass):
		at = at + 1
		ntot = ntot + nloaded
		t1 = time.time()
		time_pass = (t1 - t0) / 60
		time_tot = time_pass / at * len(sweep_files)
		sfile = "..." + file[-67:] if len(file) > 70 else file
		print('  ===> Imported %-70s [%d/%d, %5.2f%%] +%-6d %9d (%.0f/%.0f min.)' % (sfile, at, len(sweep_files), 100 * float(at) / len(sweep_files), nloaded, ntot, time_pass, time_tot))
	del pool
Ejemplo n.º 2
0
def test_locking():
    lock_path = '/tmp/test-lock-%s' % datetime.now().strftime('%Y%m%dT%H%M%S')
    text = 'we are locked. go away.'
    # before locking
    assert locking.locked(lock_path) == False
    assert locking.unlock(lock_path, text) == 'not locked'
    # locking
    assert locking.lock(lock_path, text) == 'ok'
    # locked
    assert locking.locked(lock_path) == text
    assert locking.lock(lock_path, text) == 'locked'
    assert locking.unlock(lock_path, 'not the right text') == 'miss'
    # unlocking
    assert locking.unlock(lock_path, text) == 'ok'
    # unlocked
    assert locking.locked(lock_path) == False
    assert locking.unlock(lock_path, text) == 'not locked'
    assert not os.path.exists(lock_path)
Ejemplo n.º 3
0
def test_locking():
    lock_path = '/tmp/test-lock-%s' % datetime.now(config.TZ).strftime('%Y%m%dT%H%M%S')
    text = 'we are locked. go away.'
    # before locking
    assert locking.locked(lock_path) == False
    assert locking.unlock(lock_path, text) == 'not locked'
    # locking
    assert locking.lock(lock_path, text) == 'ok'
    # locked
    assert locking.locked(lock_path) == text
    assert locking.lock(lock_path, text) == 'locked'
    assert locking.unlock(lock_path, 'not the right text') == 'miss'
    # unlocking
    assert locking.unlock(lock_path, text) == 'ok'
    # unlocked
    assert locking.locked(lock_path) == False
    assert locking.unlock(lock_path, text) == 'not locked'
    assert not os.path.exists(lock_path)
Ejemplo n.º 4
0
Archivo: smf.py Proyecto: banados/lsd
def import_from_smf(db, det_tabname, exp_tabname, smf_files, survey, create=False):
	""" Import a PS1 table from DVO

	    Note: Assumes underlying shared storage for all table
	          cells (i.e., any worker is able to write to any cell).
	"""
	with locking.lock(db.path[0] + "/.__smf-import-lock.lock"):
		if not db.table_exists(det_tabname) and create:
			# Set up commit hooks
			exp_table_def['commit_hooks'] = [ ('Updating neighbors', 1, 'lsd.smf', 'make_image_cache', [det_tabname]) ]

			# Create new tables
			det_table  = db.create_table(det_tabname, det_table_def)
			exp_table  = db.create_table(exp_tabname, exp_table_def)

			# Set up a one-to-X join relationship between the two tables (join det_table:exp_id->exp_table:exp_id)
			db.define_default_join(det_tabname, exp_tabname,
				type = 'indirect',
				m1   = (det_tabname, "det_id"),
				m2   = (det_tabname, "exp_id"),
				_overwrite=create
				)
		else:
			det_table = db.table(det_tabname)
			exp_table = db.table(exp_tabname)

	det_c2f = gen_tab2fits(det_table_def)
	exp_c2f = gen_tab2fits(exp_table_def)

	t0 = time.time()
	at = 0; ntot = 0
	pool = pool2.Pool()
	smf_fns = []
	exp_ids = []
	for (file, exp_id, smf_fn, nloaded, nin) in pool.imap_unordered(smf_files, import_from_smf_aux, (det_table, exp_table, det_c2f, exp_c2f, survey), progress_callback=pool2.progress_pass):
		smf_fns.append(smf_fn)
		exp_ids.append(exp_id)
		at = at + 1
		ntot = ntot + nloaded
		t1 = time.time()
		time_pass = (t1 - t0) / 60
		time_tot = time_pass / at * len(smf_files)
		print >>sys.stderr, '  ===> Imported %s [%d/%d, %5.2f%%] +%-6d %9d (%.0f/%.0f min.)' % (file, at, len(smf_files), 100 * float(at) / len(smf_files), nloaded, ntot, time_pass, time_tot)
	del pool

	ret = colgroup.ColGroup()
	ret._EXP   = np.array(exp_ids, dtype=np.uint64)
	ret.smf_fn = np.array(smf_fns, dtype='a40')
	return ret
Ejemplo n.º 5
0
Archivo: ptf.py Proyecto: bsesar/lsd
def import_from_catalogs(db, det_tabname, exp_tabname, catalog_files, create=False, all=False):
	""" Import a PTF catalog from a collection of SExtractor catalog files.

	    Note: Assumes underlying shared storage for all output table
	          cells (i.e., any worker is able to write to any cell).
	"""
	with locking.lock(db.path[0] + "/.__smf-import-lock.lock"):
		if not db.table_exists(det_tabname) and create:
			# Set up commit hooks
			exp_table_def['commit_hooks'] = [ ('Updating neighbors', 1, 'lsd.smf', 'make_image_cache', [det_tabname]) ]

			# Create new tables
			det_table = db.create_table(det_tabname, det_table_def)
			exp_table = db.create_table(exp_tabname, exp_table_def)

			# Set up a one-to-X join relationship between the two tables (join det_table:exp_id->exp_table:exp_id)
			db.define_default_join(det_tabname, exp_tabname,
				type = 'indirect',
				m1   = (det_tabname, "det_id"),
				m2   = (det_tabname, "exp_id")
				)
		else:
			det_table = db.table(det_tabname)
			exp_table = db.table(exp_tabname)

	# MJD of import
	now = datetime.datetime.now()
	(djm, j) = sla_caldj(now.year, now.month, now.day)
	djm -= 55682

	t0 = time.time()
	at = 0; ntot = 0
	pool = pool2.Pool()
	explist_file = open('explist.txt','w')
	for (file, nloaded, error_type, expID) in pool.imap_unordered(catalog_files, import_from_catalogs_aux, (det_table, exp_table, djm, all), progress_callback=pool2.progress_pass):
		at = at + 1
		ntot = ntot + nloaded
		t1 = time.time()
		time_pass = (t1 - t0) / 60
		time_tot = time_pass / at * len(catalog_files)
#		sfile = (file)[-65:] if len(file) > 70 else file
		sfile = file
		if error_type == 0:
			print('  ===> Imported %s [%d/%d, %5.2f%%] +%-6d %9d (%.0f/%.0f min.)' % (sfile, at, len(catalog_files), 100 * float(at) / len(catalog_files), nloaded, ntot, time_pass, time_tot))
			explist_file.write('%s\n' % str(expID))
		elif error_type == 1:
			print('%s is missing!' % (sfile))
		elif error_type == 2:
			print('%s has bad data type in exposure database!' % (sfile))
		elif error_type == 3:
			print('%s has bad data type in detection database!' % (sfile))
		elif error_type == 4:
			print('%s has bad WCS transform parameters!' % (sfile))
		else:
			print "Nothing"

		# show the dirt every 100 ingests
#		if at % 100. == 0:
#			gc.collect()
#			n_collected = gc.collect()
#			if n_collected > 0:
#				dump_garbage()

	del pool
	explist_file.close()
Ejemplo n.º 6
0
def import_from_smf(db,
                    det_tabname,
                    exp_tabname,
                    smf_files,
                    survey,
                    create=False):
    """ Import a PS1 table from DVO

	    Note: Assumes underlying shared storage for all table
	          cells (i.e., any worker is able to write to any cell).
	"""
    with locking.lock(db.path[0] + "/.__smf-import-lock.lock"):
        if not db.table_exists(det_tabname) and create:
            # Set up commit hooks
            exp_table_def['commit_hooks'] = [
                ('Updating neighbors', 1, 'lsd.smf', 'make_image_cache',
                 [det_tabname])
            ]

            # Create new tables
            det_table = db.create_table(det_tabname, det_table_def)
            exp_table = db.create_table(exp_tabname, exp_table_def)

            # Set up a one-to-X join relationship between the two tables (join det_table:exp_id->exp_table:exp_id)
            db.define_default_join(det_tabname,
                                   exp_tabname,
                                   type='indirect',
                                   m1=(det_tabname, "det_id"),
                                   m2=(det_tabname, "exp_id"),
                                   _overwrite=create)
        else:
            det_table = db.table(det_tabname)
            exp_table = db.table(exp_tabname)

    det_c2f = gen_tab2fits(det_table_def)
    exp_c2f = gen_tab2fits(exp_table_def)

    t0 = time.time()
    at = 0
    ntot = 0
    pool = pool2.Pool()
    smf_fns = []
    exp_ids = []
    for (file, exp_id, smf_fn, nloaded, nin) in pool.imap_unordered(
            smf_files,
            import_from_smf_aux,
        (det_table, exp_table, det_c2f, exp_c2f, survey),
            progress_callback=pool2.progress_pass):
        smf_fns.append(smf_fn)
        exp_ids.append(exp_id)
        at = at + 1
        ntot = ntot + nloaded
        t1 = time.time()
        time_pass = (t1 - t0) / 60
        time_tot = time_pass / at * len(smf_files)
        print >> sys.stderr, '  ===> Imported %s [%d/%d, %5.2f%%] +%-6d %9d (%.0f/%.0f min.)' % (
            file, at, len(smf_files), 100 * float(at) / len(smf_files),
            nloaded, ntot, time_pass, time_tot)
    del pool

    ret = colgroup.ColGroup()
    ret._EXP = np.array(exp_ids, dtype=np.uint64)
    ret.smf_fn = np.array(smf_fns, dtype='a40')
    return ret