def test_genvpdbs(): lc_temp_dir = TEMP_DIR + LIGHT_CURVES_DIR db_temp_dir = TEMP_DIR + DB_DIR # Create 10 vantage point database files genvpdbs.create_vpdbs(10, lc_temp_dir, db_temp_dir) # With vantage points created, we should have fully created dataset assert (simsearch.need_to_rebuild(lc_temp_dir, db_temp_dir) == False) # Load vantage points back in from disk vp_dict = simsearch.load_vp_lcs(db_temp_dir, lc_temp_dir) assert (len(vp_dict) == 10) clear_dir(TEMP_DIR, recreate=False)
def test_cmd_line_util(): os.chdir('cs207project/tsrbtreedb') _ = simsearchutil.cmd_line_util() old_sys_argv = sys.argv sys.argv = old_sys_argv + ["-r", "-d"] _ = simsearchutil.cmd_line_util() old_sys_argv = sys.argv sys.argv = old_sys_argv + ["-h"] _ = simsearchutil.cmd_line_util() clear_dir(LIGHT_CURVES_DIR, recreate=False) clear_dir(DB_DIR, recreate=False) os.chdir(os.pardir) os.chdir(os.pardir)
def test_makelcs(): lc_temp_dir = TEMP_DIR + LIGHT_CURVES_DIR db_temp_dir = TEMP_DIR + DB_DIR # Before we've generated anything, confirm that our rebuild detector method properly detects that we need to rebuild assert (simsearch.need_to_rebuild(lc_temp_dir, db_temp_dir) == True) makelcs.make_lcs_wfm(5, lc_temp_dir) assert (simsearch.need_to_rebuild(lc_temp_dir, db_temp_dir) == True) clear_dir(TEMP_DIR, recreate=False) # Generate 100 light curves in temp dir makelcs.make_lcs_wfm(100, lc_temp_dir) assert (simsearch.need_to_rebuild(lc_temp_dir, db_temp_dir) == True) # load that back in from disk lc_dict = genvpdbs.load_ts_fsm(lc_temp_dir) assert (len(lc_dict) == 100)
def create_vpdbs(n, lc_dir, db_dir): """ Create Vantage point databases in parallel Executes functions above: (1) Creates timeseries_dict from time series files on disk (2) Picks 20 vantage points at random (3) Using a process poll for parallelization, calculates kernel distance between vantage points and generated time series (4) Saves kernel distance indexes to disk as red-black tree databases """ print("Creating %d vantage point dbs" % n, end="") timeseries_dict = load_ts_fsm(lc_dir) vantage_points = pick_vantage_points(timeseries_dict, n) clear_dir(db_dir) # This is in elegant, but needed to use the map function below where # we can effectually only pass one augment to the worker process vp_tuples = [(vp, timeseries_dict, db_dir) for vp in vantage_points] # build vantage point dbs in parallel (up to the number of processes on your machine) with ProcessPoolExecutor() as pool: results = pool.map(save_vp_dbs, vp_tuples) print("....................Done.")
def test_setup_temp_dir(): """Clear existing temp dir and change working dir to be inside it""" clear_dir(TEMP_DIR) os.chdir(TEMP_DIR)
def test_clear_temp_dir(): """ Clears temp dir""" os.chdir(os.pardir) clear_dir(TEMP_DIR, recreate=False)
def test_clear(): """Clear test dir for tests""" clear_dir(TEMP_DIR, recreate=False)