예제 #1
0
def get_paths(cluster_name):
    """
    This function returns an iterable with cluster_pm_path, clean_tile_path
    :param cluster_name: name of the cluster as defined in data.objects
    :return:
    """

    # Get cluster object
    if cluster_name in objects.known_clusters:
        cl = objects.known_clusters[cluster_name]
    else:
        raise KeyError(
            f'Object {cluster_name} not instantiated in data.objects')

    # Get tiles where cluster is contained
    tiles = which_tile(cl, objects.all_tiles)

    # Check if cluster and tiles have available data
    available_clusters_files, available_tiles_files = check_available_data()

    if cluster_name in available_clusters_files:
        cl_file = available_clusters_files[cluster_name]
    else:
        raise FileNotFoundError(f'Not available data for {cluster_name}.')

    tiles_file = []
    for tile in tiles:
        if tile in available_tiles_files:
            tiles_file.append(available_tiles_files[tile])
        else:
            raise FileNotFoundError(f'Not available data for {tile}.')

    return list((cl_file, tile_file) for tile_file in tiles_file)
예제 #2
0
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
from apolo.test_tools.routines import clustering_routine
from apolo.test_tools.utils import which_tile
from apolo.data import dirconfig, objects
import multiprocessing as mp
from os import path
from apolo.catalog_proc.utils import make_dir

"""
This script is used to perform radial test to measure the performance of the algorithm in different 
known cluster positions
"""

rs = 4.0

cluster_list = [objects.m81, objects.cl86, objects.cl74]
tile_list = which_tile(cluster_list, objects.all_tiles)

data_dir = dirconfig.cross_vvv_2mass_combis_gaia
out_dir = path.join(dirconfig.test_knowncl, f'radial_test_twocolors_{rs}x')
make_dir(out_dir)


models = [(cl, tile, 'carlos', data_dir, out_dir, rs)
          for cl, tile in zip(cluster_list, tile_list)]

with mp.Pool(3) as pool:
    pool.starmap(clustering_routine, models)
예제 #3
0
from apolo.catalog_proc.utils import make_dir
import numpy as np
"""
This script do clustering on selected stellar clusters, using given hdbscan hyperparameters, 
selected space parameter.
"""

utils.check_base_data_structure()

complete_object_list = [
    objects.m81, objects.cl86, objects.cl74, objects.cl88, objects.pat94,
    objects.west1, objects.e_m81a, objects.e_cl86a, objects.e_cl74a,
    objects.e_cl88a, objects.e_pat94a, objects.e_west1a
]

complete_tile_list = which_tile(complete_object_list, objects.all_tiles)

# -------------------------------------------------------------------------------------------------------------------
# VVV 2MASS COMBIS GAIA using defined hyper-paramters

data_dir = dirconfig.cross_vvv_2mass_combis_gaia
out_dir = path.join(dirconfig.test_knowncl, 'clustering_no_grid')
#out_dir = '/home/jorge/sw_scores'
make_dir(out_dir)

object_list = [objects.m81]
tiles = which_tile(object_list, objects.all_tiles)

space_param = 'Colors+PM'
mcs = 9
ms = 9
예제 #4
0
from apolo.data import dirconfig, objects
from apolo.test_tools import utils
from apolo.clustering import cplots, ctools
"""
Simple script to perform a simple clustering, selecting all the relevant parameter by hand.
"""

# Define the output dir
output_dir = dirconfig.test_knowncl

# Select stellar-cluster and respective tile
stellar_cluster = objects.cl74
tile = utils.which_tile(stellar_cluster, objects.all_tiles)[0]

# Define the catalog and the region (in terms of l and b) to be explored
catalog_dir = dirconfig.cross_vvv_2mass_combis_gaia

catalog_file = tile.get_file(
    catalog_dir
)  # This finds automatically the respective tile-file inside catalog_dir
region = utils.setup_region(
    catalog_file, stellar_cluster,
    times=2.0)  # Only a region of 2 times nominal SC radius

# Perform HDBSCAN clustering algorithm. This function update region table adding two columns: label and probabilities
# and adds metadata relative to the clustering itself.
data, clusterer = ctools.do_hdbscan(
    region,
    space_param='Phot+PM',  # 'Phot+PM' 'Colors+PM'
    min_cluster_size=5,
    min_samples=13,
예제 #5
0
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
from apolo.test_tools.routines import clustering_routine
from apolo.test_tools.utils import which_tile
from apolo.data import dirconfig, objects
import multiprocessing as mp
from os import path
from apolo.catalog_proc.utils import make_dir
"""
This script is used to re-apply the clustering to our candidates.
"""

object_list = [
    objects.apolo01, objects.apolo02, objects.apolo03, objects.apolo04
]
tiles = which_tile(object_list, objects.all_tiles)

sp = 'Mini-alternative'
times = 6

data_dir = dirconfig.cross_vvv_2mass_combis_gaia
out_dir = path.join(dirconfig.test_candidates, f'test_r{times}')
make_dir(out_dir)

models = [(cl, tile, sp, data_dir, out_dir, times)
          for cl, tile in zip(object_list, tiles)]

with mp.Pool(mp.cpu_count() - 1) as pool:
    pool.starmap(clustering_routine, models)
예제 #6
0
from apolo.test_tools.utils import which_tile
from apolo.catalog_proc.utils import make_dir
from apolo.test_tools.utils import setup_region
from apolo.clustering import cplots, ctools
from sklearn import metrics
import numpy as np

"""
Hice este script para obtener los valores de sw para completar la tabla 3.2 de la tesis
"""

utils.check_base_data_structure()

complete_object_list = [objects.m81, objects.cl86, objects.cl74]

complete_tile_list = which_tile(complete_object_list, objects.all_tiles)

data_dir = dirconfig.cross_vvv_2mass_combis_gaia
out_dir = '/home/jorge/sw_scores'
make_dir(out_dir)

far_end_cluster = objects.cl86
tile = which_tile(far_end_cluster, objects.all_tiles)[0]

# Alternativas: 'Colors+PM', 'Mini-alternative', 'Mini'
space_param = 'Mini-alternative'
mcs = 5
ms = 20

print(far_end_cluster, tile)
catalog_file = tile.get_file(data_dir)