예제 #1
0
    def get_cluster_info_from_user(self):
        self.name = io.get_user_input("Enter the cluster name: ", "cluster name")
        self.data_directory = os.path.normpath(config.data_directory())

        self.observation_ids = get_observation_ids()
        self.observations = [Observation(obsid=x, cluster=self) for x in self.observation_ids]
        print()
        get_fitting_values = \
            io.check_yes_no("Enter values for fitting (nH, z, abundance) now? [y/n]")
        if get_fitting_values:
            self.hydrogen_column_density = io.get_user_input(
                "Enter the hydrogen column density for {} (on order of 10^22, e.g. 0.052 for 5.2e20): ".format(self.name),
                "hydrogen column density")
            self.redshift = io.get_user_input("Enter the redshift of {}: ".format(self.name), "redshift")

            self.abundance = io.get_user_input("Enter the abundance: ", "abundance")
        else:
            print("Before completing the ACB portion of the pypeline, you need "
                  "to edit the configuration file ({config}) "
                  "and update the values for hydrogen column density, redshift, "
                  "and abundance.".format(config=self.configuration_filename))
            self.hydrogen_column_density = "Update me! (on order of 10^22 e.g. 0.052 for 5.2e20)"
            self.redshift = "Update me! (e.g. 0.192)"
            self.abundance = "Update me! (e.g. 0.2)"
        self._last_step_completed = 1


        return
예제 #2
0
def get_cluster_config(clstr_name):
    data_dir = config.data_directory()
    config_file = io.get_filename_matching('{0}{1}/{1}_pypeline_config.ini'.format(data_dir, clstr_name))

    if len(config_file) >= 1:
        return config_file[-1]
    else:
        return None
예제 #3
0
def get_cluster_configs(data_dir=config.data_directory()):
    dirs = os.listdir(data_dir)
    configuration_files = []
    cluster_names = []
    for directory in dirs:
        #print("{}/{}".format(data_dir,directory))
        config_file = glob.glob(
            "{data_dir}/{directory}/*_pypeline_config.ini".format(
                data_dir=data_dir, directory=directory))
        if config_file:
            configuration_files.append(config_file[0])
            cluster_names.append(
                get_cluster_name_from_config_file(config_file[0]))
    return list(zip(cluster_names, configuration_files))
예제 #4
0
def initialize_cluster(name="", obsids=[], abundance=0.3, redshift=0.0, nH=0.0):
    clstr = cluster.ClusterObj(name=name, observation_ids=obsids, abundance=abundance,
                               redshift=redshift, hydrogen_column_density=nH,
                               data_directory=config.data_directory())
    print('Making initial cluster directory: {}'.format(clstr.directory))
    io.make_directory(clstr.directory)
    io.make_initial_directories(clstr)
    clstr.last_step_completed = 1
    print("Downloading cluster data.")
    download_data(clstr)
    clstr.last_step_completed = 2
    print("Merging observations.")
    merge_observations(clstr)
    clstr.last_step_completed = 3
예제 #5
0
def load_fashionmnist(batch_size, shuffle, random_seed=None):
    num_workers = NUM_WORKERS
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=FashionMNIST_NORM_MEAN,
                             std=FashionMNIST_NORM_STD)
    ])
    data_dir = os.path.join(data_directory(), 'FashionMNIST')

    train_data = torchvision.datasets.FashionMNIST(root=data_dir,
                                                   train=True,
                                                   download=True,
                                                   transform=transform)
    test_data = torchvision.datasets.FashionMNIST(root=data_dir,
                                                  train=False,
                                                  download=True,
                                                  transform=transform)
    indices = list(range(len(train_data)))
    if shuffle:
        np.random.RandomState(random_seed).shuffle(indices)
    train_idx, valid_idx = indices[:-N_VALID], indices[-N_VALID:]
    train_sampler = SubsetRandomSampler(train_idx)
    valid_sampler = SubsetRandomSampler(valid_idx)

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               sampler=train_sampler,
                                               num_workers=num_workers,
                                               pin_memory=PIN_MEMORY)
    valid_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               sampler=valid_sampler,
                                               num_workers=num_workers,
                                               pin_memory=PIN_MEMORY)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=num_workers,
                                              pin_memory=PIN_MEMORY)

    return train_loader, valid_loader, test_loader
예제 #6
0
def automated_cluster_init(batch_file):
    print("Automated cluster initialization using: {batch_file}".format(batch_file=batch_file))
    data_directory = config.data_directory()
    csv_clusters = io.get_cluster_info_from_csv(batch_file)
    for clstr in csv_clusters:
        cluster_obj = cluster.ClusterObj(name=clstr['name'],
                                         observation_ids=clstr['obsids'],
                                         data_directory=data_directory,
                                         abundance=clstr['abundance'],
                                         redshift=clstr['redshift'],
                                         hydrogen_column_density=clstr['hydrogen_column_density']
                                         )

        io.make_directory(cluster_obj.directory)
        cluster_obj.write_cluster_data()
        io.make_initial_directories(cluster_obj)
        cluster_obj.last_step_completed = 1
        download_data(cluster_obj)
        cluster_obj.last_step_completed = 2
        merge_observations(cluster_obj)
        cluster_obj.last_step_completed = 3