def setup(self, **kwargs): """ This function ... :param kwargs: :return: """ # Call the setup function of the base class super(Interpolator, self).setup(**kwargs) # Set input path if config.input is not None: input_path = fs.absolute_or_in_cwd(config.input) else: input_path = fs.cwd() # Set output path if config.output is not None: output_path = fs.absolute_or_in_cwd(config.output) else: output_path = fs.cwd() # Load the frame if "frame" in kwargs: self.frame = kwargs.pop("frame") else: self.load_frame() # Load the regions if "region" in kwargs: self.regions = [kwargs.pop("region")] elif "regions" in kwargs: self.regions = kwargs.pop("regions") else: self.load_regions()
def __init__(self, command, description, settings, input_dict, cwd=None, finish=None, pts_settings=None): """ This function ... :param command: :param settings: dictionary :param input_dict: dictionary :param cwd: :param finish: :param pts_settings: """ # Set working directory if cwd is None: self.cwd_specified = False cwd = fs.cwd() else: self.cwd_specified = True # Set attributes self.command = command self.description = description self.settings = settings self.input_dict = input_dict self.cwd = cwd self.finish = finish self.pts_settings = pts_settings
def __init__(self, command, description, settings, input_dict, cwd=None, finish=None): """ This function ... :param command: :param settings: :param input_dict: :param cwd: :param finish: """ # Set working directory if cwd is None: self.cwd_specified = False cwd = fs.cwd() else: self.cwd_specified = True # Set attributes self.command = command self.description = description self.settings = settings self.input_dict = input_dict self.cwd = cwd self.finish = finish
from pts.dustpedia.core.sample import DustPediaSample from pts.core.tools import filesystem as fs from pts.core.tools.stringify import tostr from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments # ----------------------------------------------------------------- definition = ConfigurationDefinition() definition.add_required("galaxy_name", "string", "galaxy name") # Set config = parse_arguments("get_mbb_parameters", definition) # ----------------------------------------------------------------- path = fs.cwd() # ----------------------------------------------------------------- # Create the DustPedia sample sample = DustPediaSample() galaxy_name = sample.get_name(config.galaxy_name) # Create the database database = DustPediaDatabase() # Login username, password = get_account() database.login(username, password) # Get the parameters
def run_configurable(table_matches, args, tables): """ This function ... :param table_matches: :param args: :param tables: :return: """ # Determine the configuration method configuration_method = None if args.interactive: configuration_method = "interactive" elif args.arguments: configuration_method = "arguments" elif args.configfile is not None: configuration_method = "file:" + args.configfile elif args.rerun: configuration_method = "last" # Regenerate the configuration method option if args.interactive: configuration_method_argument = "--interactive" elif args.arguments: configuration_method_argument = "--arguments" elif args.configfile is not None: configuration_method_argument = "--configfile '" + args.configfile + "'" elif args.rerun: configuration_method_argument = "--rerun" else: configuration_method_argument = "" # Resolve subproject, index = table_matches[0] resolved = introspection.resolve_from_match(subproject, tables[subproject], index) # Get properties title = resolved.title command_name = resolved.command_name hidden = resolved.hidden description = resolved.description module_path = resolved.module_path class_name = resolved.class_name configuration_method_table = resolved.configuration_method configuration_module_path = resolved.configuration_module_path subproject_path = introspection.pts_subproject_dir(subproject) # Set sys.argv[0] = fs.join(introspection.pts_root_dir, module_path.replace(".", "/") + ".py") # this is actually not necessary (and not really correct, it's not like we are calling the module where the class is..) del sys.argv[1] # but this is important # Get a list of the leftover arguments leftover_arguments = sys.argv[1:] # Welcome message if subproject == "modeling": welcome_modeling() elif subproject == "magic": welcome_magic() elif subproject == "dustpedia": welcome_dustpedia() elif subproject == "evolve": welcome_evolve() # Special if subproject == "modeling": check_modeling_cwd(command_name, fs.cwd()) # Get the configuration definition definition = introspection.get_configuration_definition_pts_not_yet_in_pythonpath(configuration_module_path) # If not specified on the command line (before the command name), then use the default specified in the commands.dat file if configuration_method is None: configuration_method = configuration_method_table # Check whether arguments are passed and the configuration method is interactive if configuration_method == "interactive" and len(leftover_arguments) > 0: raise ValueError("Arguments on the command-line are not supported by default for this command. Run with pts --arguments to change this behaviour.") # Create the configuration config = create_configuration(definition, command_name, description, configuration_method) ## SAVE THE CONFIG if requested if config.write_config: config_filepath = config.config_file_path(command_name) config.saveto(config_filepath) else: config_filepath = None # If this is not a re-run if not args.rerun: if not fs.is_directory(introspection.pts_user_config_dir): fs.create_directory(introspection.pts_user_config_dir) # CACHE THE CONFIG config_cache_path = fs.join(introspection.pts_user_config_dir, command_name + ".cfg") config.saveto(config_cache_path) # Setup function if subproject == "modeling": setup_modeling(command_name, fs.cwd(), configuration_method_argument) elif subproject == "magic": setup_magic(command_name, fs.cwd()) elif subproject == "dustpedia": setup_dustpedia(command_name, fs.cwd()) elif subproject == "evolve": setup_evolve(command_name, fs.cwd()) # Initialize the logger log = initialize_pts(config, remote=args.remote, command_name=command_name) # Exact command name exact_command_name = subproject + "/" + command_name # If the PTS command has to be executed remotely if args.remote is not None: run_remotely(exact_command_name, config, args.keep, args.remote, log) # The PTS command has to be executed locally else: run_locally(exact_command_name, module_path, class_name, config, args.input_files, args.output_files, args.output, log) # Finish function if subproject == "modeling": finish_modeling(command_name, fs.cwd(), config_path=config_filepath) elif subproject == "magic": finish_magic(command_name, fs.cwd()) elif subproject == "dustpedia": finish_dustpedia(command_name, fs.cwd()) elif subproject == "evolve": finish_evolve(command_name, fs.cwd())
"--keep", action="store_true", help= "add this option to make sure the output is kept remotely after a subsequent retrieve attempt" ) # Parse the command line arguments arguments = parser.parse_args() # ----------------------------------------------------------------- # Create the remote execution environment remote = Remote() remote.setup(arguments.remote) for path in fs.files_in_path(fs.cwd(), extension="sim"): # Create simulation sim = RemoteSimulation.from_file(path) if not sim.retrieved: continue local_output_path = sim.output_path remote_simulation_path = sim.remote_simulation_path remote_output_path = sim.remote_output_path remote_input_path = sim.remote_input_path if not remote.is_directory(remote_simulation_path): remote.create_directory(remote_simulation_path) if not remote.is_directory(remote_output_path):
# ** © Astronomical Observatory, Ghent University ** # ***************************************************************** from pts.core.basics.configuration import ConfigurationDefinition from pts.core.tools import filesystem as fs from pts.core.launch.options import LoggingOptions, AnalysisOptions from pts.core.simulation.output import output_type_choices # ----------------------------------------------------------------- # Create the configuration definition = ConfigurationDefinition() # Input and output definition.add_optional("simulation_input", "directory_path", "input directory for the simulation(s)", letter="i") definition.add_optional("simulation_output", "directory_path", "output directory for the simulation(s)", fs.cwd(), letter="o", convert_default=True) # Various flags definition.add_flag("relative", "treats the given input and output paths as being relative to the ski/fski file") definition.add_flag("emulate", "emulate the simulation while limiting computation") # Other definition.add_flag("keep", "keep remote input and output") definition.add_flag("keep_input", "keep remote input specifically") definition.add_optional("retrieve_types", "string_list", "types of output files that have to be retrieved/retained (None means everything)", choices=output_type_choices) # Special things definition.add_flag("dry", "dry run (don't actually launch the simulations)", False) definition.add_flag("attached", "launch the simulations in attached mode (only works if remotes without scheduling system are used)") # Logging options
# Create the configuration definition definition = ConfigurationDefinition() # Add optional arguments definition.add_optional( "image", str, "the name of the image for which to run the initialization") definition.add_flag("visualise", "make visualisations") # Get configuration reader = ConfigurationReader("initialize_preparation") config = reader.read(definition) # ----------------------------------------------------------------- # Determine the log file path logfile_path = fs.join(fs.cwd(), "log", time.unique_name("log") + ".txt") if config.report else None # Determine the log level level = "DEBUG" if config.debug else "INFO" # Initialize the logger log = logging.setup_log(level=level, path=logfile_path) log.start("Starting initialize_data ...") # ----------------------------------------------------------------- names_column = [] paths_column = [] prep_names_column = []
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** # Import the relevant PTS classes and modules from pts.core.basics.configuration import ConfigurationDefinition from pts.modeling.fitting.component import get_generation_names, get_last_finished_generation from pts.core.tools import filesystem as fs # ----------------------------------------------------------------- # Create the configuration definition = ConfigurationDefinition(log_path="log", config_path="config") # Add settings definition.add_positional_optional("features", "string_list", "features to be plotted") definition.add_positional_optional("generation", "string", "generation for which to plot the features", choices=get_generation_names(fs.cwd()), default=get_last_finished_generation(fs.cwd())) # -----------------------------------------------------------------
# -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** ## \package pts.do.magic.get_ukidss Get UKIDSS images for a particular galaxy. # ----------------------------------------------------------------- # Ensure Python 3 compatibility from __future__ import absolute_import, division, print_function # Import the relevant PTS classes and modules from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments from pts.magic.services.ukidss import UKIDSS from pts.core.tools import filesystem as fs # ----------------------------------------------------------------- definition = ConfigurationDefinition() definition.add_required("galaxy_name", "string", "galaxy name") config = parse_arguments("list_ukidss", definition) # ----------------------------------------------------------------- ukidss = UKIDSS() ukidss.download_images(config.galaxy_name, fs.cwd()) # -----------------------------------------------------------------
# ----------------------------------------------------------------- last_generation = None # Check the index of the last generation for name in fs.directories_in_path(): if "reference" in name or "original" in name: continue generation = int(name.split("Generation ")[1]) if last_generation is None or generation > last_generation: last_generation = generation # ----------------------------------------------------------------- if last_generation is None: generation_path = fs.cwd() print("Current generation: the initial population") else: generation_path = fs.join(fs.cwd(), "Generation " + str(last_generation)) print("Current generation: " + str(last_generation)) # ----------------------------------------------------------------- # Path to the current GA object path = fs.join(generation_path, "ga.pickle") # Path to the parameters table parameters_path = fs.join(generation_path, "parameters.dat") # Path to the chi squared table chi_squared_path = fs.join(generation_path, "chi_squared.dat")
from pts.magic.tools import catalogs # ----------------------------------------------------------------- # Create the command-line parser parser = argparse.ArgumentParser() parser.add_argument("galaxies", type=str, help="the name of the file containing the galaxy catalog", nargs='?', default="galaxies.dat") parser.add_argument("stars", type=str, help="the name of the file containing the stellar catalog", nargs='?', default="stars.dat") parser.add_argument("--debug", type=str, help="debug mode") # Parse the command line arguments arguments = parser.parse_args() # ----------------------------------------------------------------- directory_path = fs.cwd() # Determine the full paths to the galaxy and star catalog galactic_catalog_path = fs.join(directory_path, arguments.galaxies) stellar_catalog_path = fs.join(directory_path, arguments.stars) # Open the galactic catalog (to get the name of the principal galaxy) galactic_catalog = tables.from_file(galactic_catalog_path) galaxy_name = None # Loop over the entries of the galactic catalog for i in range(len(galactic_catalog)): if galactic_catalog["Principal"][i]:
# Local path definition.add_positional_optional("local_path", "string", "path of the local directory to store the file/directory") # Create configuration config = parse_arguments("retrieve", definition, "Retrieve a file or directory from a remote host") # ----------------------------------------------------------------- # Create remote remote = Remote(host_id=config.remote) # ----------------------------------------------------------------- # Set full path of origin origin = remote.absolute_path(config.remote_path) # Set full path to the destination if config.local_path is not None: destination = fs.absolute_or_in_cwd(config.local_path) else: destination = fs.cwd() # ----------------------------------------------------------------- # Debugging log.debug("Origin: " + origin) log.debug("Destination: " + destination) # Upload remote.download(origin, destination) # -----------------------------------------------------------------
# ----------------------------------------------------------------- # Determine the full path to the parameter file arguments.filepath = fs.absolute(arguments.file) # Determine the full path to the input and output directories if arguments.input is not None: arguments.input_path = fs.absolute(arguments.input) if arguments.output is not None: arguments.output_path = fs.absolute(arguments.output) # ----------------------------------------------------------------- # Determine the log file path logfile_path = fs.join(fs.cwd(), time.unique_name("launch") + ".txt") if arguments.report else None # Determine the log level level = "DEBUG" if arguments.debug else "INFO" # Initialize the logger log = logging.setup_log(level=level, path=logfile_path) log.start("Starting launch ...") # ----------------------------------------------------------------- # Either create a SkirtRemoteLauncher or a SkirtLauncher if arguments.remote: launcher = SkirtRemoteLauncher.from_arguments(arguments) else: launcher = SkirtLauncher.from_arguments(arguments)
# Galaxy name definition.add_required("galaxy_name", str, "the name of the galaxy") definition.add_required("band", str, "the band (GALEX or SDSS u/g/r/i/z)") # Optional definition.add_optional("remote", str, "the remote host name", None) # Get configuration reader = ConfigurationReader("get_poisson_errors", "Calculate poisson error maps for DustPedia UV and optical images") config = reader.read(definition) arguments = reader.get_arguments() # ----------------------------------------------------------------- # Determine the log file path logfile_path = fs.join(fs.cwd(), time.unique_name("log") + ".txt") if config.report else None # Determine the log level level = "DEBUG" if config.debug else "INFO" # Initialize the logger log = logging.setup_log(level=level, path=logfile_path) log.start("Starting get_poisson_errors ...") # ----------------------------------------------------------------- temp_name = time.unique_name(config.band.replace(" ", "")) # ----------------------------------------------------------------- # Remotely
# Create the command-line parser parser = argparse.ArgumentParser() parser.add_argument("remote", nargs='?', default=None, help="the name of the remote host to connect to") parser.add_argument("ids", type=parsing.simulation_ids, help="unretrieve the simulations with these ID's") parser.add_argument("--keep", action="store_true", help="add this option to make sure the output is kept remotely after a subsequent retrieve attempt") # Parse the command line arguments arguments = parser.parse_args() # ----------------------------------------------------------------- # Create the remote execution environment remote = Remote() remote.setup(arguments.remote) for path in fs.files_in_path(fs.cwd(), extension="sim"): # Create simulation sim = RemoteSimulation.from_file(path) if not sim.retrieved: continue local_output_path = sim.output_path remote_simulation_path = sim.remote_simulation_path remote_output_path = sim.remote_output_path remote_input_path = sim.remote_input_path if not remote.is_directory(remote_simulation_path): remote.create_directory(remote_simulation_path) if not remote.is_directory(remote_output_path): remote.create_directory(remote_output_path)
config = parse_arguments("scaling_plots", definition) # ----------------------------------------------------------------- # Set figsize if config.small: figsize = "8,6" figsize_timelines = "8,8" else: figsize = "12,9" figsize_timelines = "12,12" # ----------------------------------------------------------------- # Locate the scaling test suite directory suite_path = fs.join(fs.cwd(), config.suite_name) if not fs.is_directory(suite_path): raise ValueError("The directory '" + suite_path + "' does not exist") # ----------------------------------------------------------------- # Make directory for output output_path = fs.create_directory_in(fs.cwd(), time.unique_name("scaling_plots")) # Make subdirectories single_node_path = fs.create_directory_in(output_path, "Single-node comparison") multi_node_path = fs.create_directory_in( output_path, "Load balancing and multi-node scaling") communication_path = fs.create_directory_in(output_path, "Communication")
# ----------------------------------------------------------------- # Create the configuration definition definition = ConfigurationDefinition() # Add flags definition.add_flag("table", "save the extracted progress table") # Get configuration reader = ConfigurationReader("plotprogress") config = reader.read(definition) # ----------------------------------------------------------------- # Look for a file in the current working directory that contains extracted progress information progress_table_path = fs.join(fs.cwd(), "progress.dat") if fs.is_file(progress_table_path): table = ProgressTable.from_file(progress_table_path) # If extracted progress information is not present, first perform the extraction else: # Create a SkirtSimulation object based on a log file present in the current working directory simulation = createsimulations(single=True) # Create a new ProgressExtractor instance extractor = ProgressExtractor() # Run the extractor and get the table table = extractor.run(simulation)
definition = ConfigurationDefinition() definition.add_positional_optional("seed", int, "the random seed", 4357) # Get configuration reader = ConfigurationReader("explore") config = reader.read(definition) # ----------------------------------------------------------------- seed = config.seed prng = setup_prng(seed) # ----------------------------------------------------------------- # path to the GA object path = fs.join(fs.cwd(), "ga.pickle") # Path to the parameter table parameters_path = fs.join(fs.cwd(), "parameters.dat") # ----------------------------------------------------------------- # Inform the user log.info("Creating the GA engine ...") # Genome instance genome = G1DList(2) genome.setParams(rangemin=0., rangemax=50., bestrawscore=0.00, rounddecimal=2) genome.initializator.set(initializators.G1DListInitializatorReal) genome.mutator.set(mutators.G1DListMutatorRealGaussian)
# ----------------------------------------------------------------- # Create the configuration definition definition = ConfigurationDefinition() # Add flags definition.add_flag("table", "save the extracted memory table") # Get the configuration config = parse_arguments("plotmemory", definition) # ----------------------------------------------------------------- # Look for a file in the current working directory that contains extracted memory information memory_table_path = fs.join(fs.cwd(), "memory.dat") if fs.is_file(memory_table_path): table = MemoryUsageTable.from_file(memory_table_path) # If extracted memory information is not present, first perform the extraction else: # Create a SkirtSimulation object based on a log file present in the current working directory simulation = createsimulations(single=True) # Create a new MemoryExtractor instance extractor = MemoryExtractor() # Run the extractor and get the memory table table = extractor.run(simulation)
from pts.core.tools.random import load_state, save_state # ----------------------------------------------------------------- last_generation = None # Check the index of the last generation for name in fs.directories_in_path(): if "reference" in name or "original" in name: continue generation = int(name.split("Generation ")[1]) if last_generation is None or generation > last_generation: last_generation = generation # ----------------------------------------------------------------- if last_generation is None: generation_path = fs.cwd() print("Current generation: the initial population") else: generation_path = fs.join(fs.cwd(), "Generation " + str(last_generation)) print("Current generation: " + str(last_generation)) # ----------------------------------------------------------------- # Path to the current GA object path = fs.join(generation_path, "ga.pickle") # Path to the parameters table parameters_path = fs.join(generation_path, "parameters.dat") # Path to the chi squared table chi_squared_path = fs.join(generation_path, "chi_squared.dat")
definition.add_required("band", str, "the band (GALEX or SDSS u/g/r/i/z)") # Optional definition.add_optional("remote", str, "the remote host name", None) # Get configuration reader = ConfigurationReader( "get_poisson_errors", "Calculate poisson error maps for DustPedia UV and optical images") config = reader.read(definition) arguments = reader.get_arguments() # ----------------------------------------------------------------- # Determine the log file path logfile_path = fs.join(fs.cwd(), time.unique_name("log") + ".txt") if config.report else None # Determine the log level level = "DEBUG" if config.debug else "INFO" # Initialize the logger log = logging.setup_log(level=level, path=logfile_path) log.start("Starting get_poisson_errors ...") # ----------------------------------------------------------------- temp_name = time.unique_name(config.band.replace(" ", "")) # -----------------------------------------------------------------
help="the name of the file containing the galaxy catalog", nargs='?', default="galaxies.dat") parser.add_argument("stars", type=str, help="the name of the file containing the stellar catalog", nargs='?', default="stars.dat") parser.add_argument("--debug", type=str, help="debug mode") # Parse the command line arguments arguments = parser.parse_args() # ----------------------------------------------------------------- directory_path = fs.cwd() # Determine the full paths to the galaxy and star catalog galactic_catalog_path = fs.join(directory_path, arguments.galaxies) stellar_catalog_path = fs.join(directory_path, arguments.stars) # Open the galactic catalog (to get the name of the principal galaxy) galactic_catalog = tables.from_file(galactic_catalog_path) galaxy_name = None # Loop over the entries of the galactic catalog for i in range(len(galactic_catalog)): if galactic_catalog["Principal"][i]:
## \package pts.do.modeling.plot_mappings Plot MAPPINGS examples. # ----------------------------------------------------------------- # Ensure Python 3 compatibility from __future__ import absolute_import, division, print_function # Import the relevant PTS classes and modules from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments from pts.core.tools import filesystem as fs from pts.modeling.misc.playground import plot_mappings_examples from pts.core.basics.log import log # ----------------------------------------------------------------- # Create configuration definition = ConfigurationDefinition() definition.add_positional_optional("directory", "directory_path", "output directory", fs.cwd()) config = parse_arguments("plot_mappings", definition) # ----------------------------------------------------------------- # Create temp path temp_path = fs.create_directory_in(config.directory, "seds") # Plot plot_mappings_examples(config.directory, temp_path=temp_path) # -----------------------------------------------------------------
# ----------------------------------------------------------------- # Create the configuration definition definition = ConfigurationDefinition() # Add setting definition.add_required("step", str, "the modeling step for which to clear the output") # Get the configuration reader = ConfigurationReader("clear") config = reader.read(definition) # ----------------------------------------------------------------- # Determine the log file path logfile_path = fs.join(fs.cwd(), "log", time.unique_name("log") + ".txt") if config.arguments.report else None # Determine the log level level = "DEBUG" if config.arguments.debug else "INFO" # Initialize the logger log = logging.setup_log(level=level, path=logfile_path) log.start("Starting clear ...") # ----------------------------------------------------------------- prep_path = fs.join(config.path, "prep") components_path = fs.join(config.path, "components") truncated_path = fs.join(config.path, "truncated") phot_path = fs.join(config.path, "phot") maps_path = fs.join(config.path, "maps")
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** # Import the relevant PTS classes and modules from pts.core.basics.configuration import ConfigurationDefinition from pts.core.tools import filesystem as fs from pts.core.remote.host import find_host_ids # ----------------------------------------------------------------- # Create the configuration definition = ConfigurationDefinition() # Add required arguments definition.add_required("ski", "file_path", "name/path of the ski file") definition.add_required("fski", "file_path", "name/path of the fski file") # Input and output definition.add_optional("input", "directory_path", "input directory for the simulation(s)", letter="i") definition.add_optional("output", "directory_path", "output directory for the simulation(s)", fs.cwd(), letter="o", convert_default=True) # Add positional arguments definition.add_positional_optional("remote", "string", "the remote host on which to run the simulation (if none is specified, the simulation is run locally", choices=find_host_ids()) # -----------------------------------------------------------------
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** # Import the relevant PTS classes and modules from pts.modeling.fitting.component import get_generation_names, get_last_finished_generation from pts.core.tools import filesystem as fs from pts.modeling.config.component import definition # ----------------------------------------------------------------- definition = definition.copy() # Add settings definition.add_positional_optional("features", "string_list", "features to be plotted") definition.add_positional_optional("generation", "string", "generation for which to plot the features", choices=get_generation_names(fs.cwd()), default=get_last_finished_generation(fs.cwd())) # -----------------------------------------------------------------
# ----------------------------------------------------------------- # Ensure Python 3 compatibility from __future__ import absolute_import, division, print_function # Import the relevant PTS classes and modules from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments from pts.core.tools import filesystem as fs from pts.core.tools.serialization import load_dict from pts.core.tools import sequences from pts.core.tools import formatting as fmt # ----------------------------------------------------------------- # Find methods file methods_path = fs.join(fs.cwd(), "methods.txt") if not fs.is_file(methods_path): raise IOError("Methods file not found") methods = load_dict(methods_path) # Find origins file origins_path = fs.join(fs.cwd(), "origins.txt") if not fs.is_file(origins_path): raise IOError("Origins file not found") origins = load_dict(origins_path) # ----------------------------------------------------------------- filenames = fs.files_in_path(fs.cwd(), returns="name", extension="fits") # ----------------------------------------------------------------- map_names = sequences.union(methods.keys(), origins.keys(), filenames)
#print(find_path) #print(remote.items_in_path(find_path, recursive=True)) # Loop over the files paths = remote.files_in_path(find_path, contains=config.contains, not_contains=config.not_contains, extension=config.extension, recursive=config.recursive) if len(paths) == 0: log.warning("No files found") else: for path in paths: if config.full: print(path) else: print(path.split(find_path)[1]) # LOCALLY else: # Determine path find_path = fs.cwd() # Loop over the files paths = fs.files_in_path(find_path, contains=config.contains, extension=config.extension, recursive=config.recursive) if len(paths) == 0: log.warning("No files found") else: for path in paths: if config.full: print(path) else: print(path.split(find_path)[1]) # -----------------------------------------------------------------
definition.add_optional("max_npixels", "positive_integer", "maximum number of pixels") definition.add_optional("downsample", "positive_real", "downsample with this factor") definition.add_flag("show", "show after creating", False) config = parse_arguments("fits_to_png", definition) # ----------------------------------------------------------------- # Inform the user log.info("Loading the FITS file ...") # Load the FITS file frame = Frame.from_file(config.filename) # ----------------------------------------------------------------- if config.output is not None: filepath = fs.absolute_or_in(config.output, fs.cwd()) else: # Determine the path name = fs.strip_extension(fs.name(config.filename)) filepath = fs.absolute_path(name + ".png") # ----------------------------------------------------------------- # Max npixels if config.max_npixels is not None: # Determine downsample factor if frame.xsize > config.max_npixels or frame.ysize > config.max_npixels: factor = max(frame.xsize, frame.ysize) / float(config.max_npixels)
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** # Import the relevant PTS classes and modules from pts.modeling.plotting.data import get_features from pts.modeling.config.plot import definition from pts.core.tools import filesystem as fs # ----------------------------------------------------------------- # Add settings features = get_features(fs.cwd()) definition.pos_optional["features"].choices = features definition.pos_optional["features"].default = features.keys() # -----------------------------------------------------------------
if fs.is_file(memory_table_path): table = MemoryUsageTable.from_file(memory_table_path) # If extracted memory information is not present, first perform the extraction else: # Create a SkirtSimulation object based on a log file present in the current working directory simulation = createsimulations(single=True) # Create a new MemoryExtractor instance extractor = MemoryExtractor() # Run the extractor and get the memory table table = extractor.run(simulation) # ----------------------------------------------------------------- if config.table and not fs.is_file(memory_table_path): table.saveto(memory_table_path) # ----------------------------------------------------------------- # Determine the path to the plotting directory plot_path = fs.join(fs.cwd()) # Create a MemoryPlotter instance plotter = MemoryPlotter() # Run the memory plotter plotter.run(table, plot_path) # -----------------------------------------------------------------
# ----------------------------------------------------------------- # If an input directory is given if arguments.input is not None: # Determine the full path to the input directory input_path = fs.absolute(arguments.input) # Give an error if the input directory does not exist if not fs.is_directory(input_path): raise argparse.ArgumentError(input_path, "The input directory does not exist") # If no input directory is given, assume the input is placed in the current working directory else: input_path = fs.cwd() # ----------------------------------------------------------------- # If an output directory is given if arguments.output is not None: # Determine the full path to the output directory output_path = fs.absolute(arguments.output) # Create the directory if it does not yet exist if not fs.is_directory(output_path): fs.create_directory(output_path) # If no output directory is given, place the output in the current working directory else: output_path = fs.cwd()
parser.add_argument("--config", type=str, help="the name of a configuration file") # Visualisation parser.add_argument("--visualise", action="store_true", help="make visualisations") # Parse the command line arguments arguments = parser.parse_args() # ----------------------------------------------------------------- # Determine the full input and output paths if arguments.output is None: arguments.output = fs.cwd() if arguments.input is None: arguments.input = fs.cwd() arguments.input = fs.absolute(arguments.input) arguments.output = fs.absolute(arguments.output) # ----------------------------------------------------------------- # Determine the log file path logfile_path = fs.join(arguments.output, time.unique_name("log") + ".txt") if arguments.report else None # Determine the log level level = "DEBUG" if arguments.debug else "INFO" # Initialize the logger
ga.terminationCriteria.set(RawScoreCriteria) ga.setMinimax(Consts.minimaxType["minimize"]) ga.setGenerations(5) ga.setCrossoverRate(0.5) ga.setPopulationSize(100) ga.setMutationRate(0.5) # Evolve ga.evolve(freq_stats=1) print("Final generation:", ga.currentGeneration) # ----------------------------------------------------------------- # Determine the path to the reference directory ref_path = fs.join(fs.cwd(), "original") fs.create_directory(ref_path) # ----------------------------------------------------------------- best = ga.bestIndividual() best_parameter_a = best.genomeList[0] best_parameter_b = best.genomeList[1] best_path = fs.join(ref_path, "best.dat") with open(best_path, "w") as best_file: best_file.write("Parameter a: " + str(best_parameter_a) + "\n") best_file.write("Parameter b: " + str(best_parameter_b) + "\n")
ga.terminationCriteria.set(RawScoreCriteria) ga.setMinimax(Consts.minimaxType["minimize"]) ga.setGenerations(5) ga.setCrossoverRate(0.5) ga.setPopulationSize(100) ga.setMutationRate(0.5) # Evolve ga.evolve(freq_stats=1) print("Final generation:", ga.currentGeneration) # ----------------------------------------------------------------- # Determine the path to the reference directory ref_path = fs.join(fs.cwd(), "original") fs.create_directory(ref_path) # ----------------------------------------------------------------- best = ga.bestIndividual() best_parameter_a = best.genomeList[0] best_parameter_b = best.genomeList[1] best_path = fs.join(ref_path, "best.dat") with open(best_path, 'w') as best_file: best_file.write("Parameter a: " + str(best_parameter_a) + "\n") best_file.write("Parameter b: " + str(best_parameter_b) + "\n")
arguments = parser.parse_args() # ----------------------------------------------------------------- # If an input directory is given if arguments.input is not None: # Determine the full path to the input directory input_path = fs.absolute(arguments.input) # Give an error if the input directory does not exist if not fs.is_directory(input_path): raise argparse.ArgumentError(input_path, "The input directory does not exist") # If no input directory is given, assume the input is placed in the current working directory else: input_path = fs.cwd() # ----------------------------------------------------------------- # If an output directory is given if arguments.output is not None: # Determine the full path to the output directory output_path = fs.absolute(arguments.output) # Create the directory if it does not yet exist if not fs.is_directory(output_path): fs.create_directory(output_path) # If no output directory is given, place the output in the current working directory else: output_path = fs.cwd()
parser.add_argument("--debug", action="store_true", help="enable debug logging mode") parser.add_argument("--report", action='store_true', help="write a report file") parser.add_argument("--steps", action="store_true", help="write the results of intermediate steps") parser.add_argument("--config", type=str, help="the name of a configuration file") # Visualisation parser.add_argument("--visualise", action="store_true", help="make visualisations") # Parse the command line arguments arguments = parser.parse_args() # ----------------------------------------------------------------- # Determine the full input and output paths if arguments.output is None: arguments.output = fs.cwd() if arguments.input is None: arguments.input = fs.cwd() arguments.input = fs.absolute(arguments.input) arguments.output = fs.absolute(arguments.output) # ----------------------------------------------------------------- # Determine the log file path logfile_path = fs.join(arguments.output, time.unique_name("log") + ".txt") if arguments.report else None # Determine the log level level = "DEBUG" if arguments.debug else "INFO" # Initialize the logger log = logging.setup_log(level=level, path=logfile_path) log.start("Starting prepare_image ...")
paths = remote.files_in_path(find_path, contains=config.contains, not_contains=config.not_contains, extension=config.extension, recursive=config.recursive, exact_name=config.exact_name, exact_not_name=config.exact_not_name) nfiles = len(paths) if nfiles == 0: log.warning("No files found") else: log.info(str(nfiles) + " files found") for path in paths: if config.full: print(path) else: print(path.split(find_path)[1]) if config.remove: remote.remove_file(path) # LOCALLY else: # Determine path find_path = fs.cwd() # Loop over the files paths = fs.files_in_path(find_path, contains=config.contains, extension=config.extension, recursive=config.recursive, exact_name=config.exact_name, exact_not_name=config.exact_not_name, directory_not_contains=config.directory_not_contains, directory_exact_not_name=config.directory_exact_not_name) nfiles = len(paths) if nfiles == 0: log.warning("No files found") else: log.info(str(nfiles) + " files found") for path in paths: if config.full: print(path) else: print(path.split(find_path)[1]) if config.remove: fs.remove_file(path)
# ----------------------------------------------------------------- # Get simulations for each remote host simulation_paths = dict() for host_id in config.remotes: # Get the simulation paths paths_host = get_simulation_paths_for_host(host_id, as_dict=True) # Set the simulation paths simulation_paths[host_id] = paths_host # ----------------------------------------------------------------- # Determine the output directory output_path = config.output if config.output is not None else fs.cwd() # ----------------------------------------------------------------- # Loop over the simulation names and look for matches for simulation_name in config.names: the_host_id = None # Loop over the remotes, look for match for host_id in simulation_paths: if simulation_name in simulation_paths[host_id]: the_host_id = host_id break # Simulation file not found
default_relative_sigma_level = 1.0 # ----------------------------------------------------------------- scales = ["log", "sqrt"] default_colour = "jet" default_interval = "pts" # ----------------------------------------------------------------- default_mask_color = "black" # ----------------------------------------------------------------- # Set the modeling path modeling_path = fs.cwd() # Create the maps collection collection = MapsCollection.from_modeling_path(modeling_path) # ----------------------------------------------------------------- # Get maps old_map_paths = collection.get_old_stellar_disk_map_paths() young_map_paths = collection.get_young_map_paths(flatten=True) ionizing_map_paths = collection.get_ionizing_map_paths(flatten=True) dust_map_paths = collection.get_not_hot_dust_map_paths(flatten=True) # Get map names old_map_names = old_map_paths.keys() young_map_names = young_map_paths.keys()
definition.add_required("remote_path", "string", "remote path of the file or directory to retrieve") definition.add_required("remote", "string", "remote host to retrieve from", choices=find_host_ids()) definition.add_optional("local_path", "string", "path of the local directory to store the file/directory") config = parse_arguments("retrieve", definition) # ----------------------------------------------------------------- # Create remote remote = Remote(host_id=config.remote) # ----------------------------------------------------------------- # Set full path of origin origin = remote.absolute_path(config.remote_path) # Set full path to the destination if config.local_path is not None: destination = fs.absolute_or_in_cwd(config.local_path) else: destination = fs.cwd() # ----------------------------------------------------------------- # Debugging log.debug("Origin: " + origin) log.debug("Destination: " + destination) # Upload remote.download(origin, destination) # -----------------------------------------------------------------
# ----------------------------------------------------------------- # Create the configuration definition definition = ConfigurationDefinition() # Add flags definition.add_flag("table", "save the extracted progress table") # Get configuration config = parse_arguments("plotprogress", definition) # ----------------------------------------------------------------- # Look for a file in the current working directory that contains extracted progress information progress_table_path = fs.join(fs.cwd(), "progress.dat") if fs.is_file(progress_table_path): table = ProgressTable.from_file(progress_table_path) # If extracted progress information is not present, first perform the extraction else: table = extract_progress_cwd() # ----------------------------------------------------------------- if config.table and not fs.is_file(progress_table_path): table.saveto(progress_table_path) # ----------------------------------------------------------------- # Determine the path to the plotting directory plot_path = fs.join(fs.cwd()) # Create a ProgressPlotter instance
ga.setMinimax(constants.minimaxType["minimize"]) ga.setGenerations(5) ga.setCrossoverRate(0.5) ga.setPopulationSize(100) ga.setMutationRate(0.5) # Evolve #ga.evolve(freq_stats=False) ga.evolve() print("Final generation:", ga.currentGeneration) # ----------------------------------------------------------------- # Determine the path to the reference directory ref_path = fs.join(fs.cwd(), "reference") fs.create_directory(ref_path) # ----------------------------------------------------------------- best = ga.bestIndividual() best_parameter_a = best.genomeList[0] best_parameter_b = best.genomeList[1] best_path = fs.join(ref_path, "best.dat") with open(best_path, 'w') as best_file: best_file.write("Parameter a: " + str(best_parameter_a) + "\n") best_file.write("Parameter b: " + str(best_parameter_b) + "\n")
# Import the relevant PTS classes and modules from pts.core.tools import filesystem as fs from pts.core.tools import introspection from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments from pts.modeling.component.component import load_modeling_history # ----------------------------------------------------------------- # Create the configuration definition definition = ConfigurationDefinition() # Get configuration config = parse_arguments("history", definition) # ----------------------------------------------------------------- # Local table path local_table_path = fs.join(introspection.pts_dat_dir("modeling"), "s4g", "s4g_p4_table8.dat") # ----------------------------------------------------------------- # Get the history history = load_modeling_history(fs.cwd()) # Loop over the entries for i in range(len(history)): print(history["Command"][i], history["Start time"][i], history["End time"][i]) # -----------------------------------------------------------------
# ----------------------------------------------------------------- # Create the configuration definition = ConfigurationDefinition() # Add flags definition.add_flag("table", "save the extracted timeline table") # Get configuration reader = ConfigurationReader("plottimeline") config = reader.read(definition) # ----------------------------------------------------------------- # Look for a file in the current working directory that contains extracted timeline information timeline_table_path = fs.join(fs.cwd(), "timeline.dat") if fs.is_file(timeline_table_path): table = TimeLineTable.from_file(timeline_table_path) # If extracted timeline information is not present, first perform the extraction else: # Create a SkirtSimulation object based on a log file present in the current working directory simulation = createsimulations(single=True) # Create a new TimeLineExtractor instance extractor = TimeLineExtractor() # Run the extractor and get the timeline table table = extractor.run(simulation)
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** ## \package pts.do.magic.verify_fits Verify a batch of FITS files in the current working directory. # ----------------------------------------------------------------- # Ensure Python 3 compatibility from __future__ import absolute_import, division, print_function # Import astronomical modules from astropy.io import fits # Import the relevant PTS classes and modules from pts.core.tools import filesystem as fs # ----------------------------------------------------------------- # Loop over all files in the current path for path in fs.files_in_path(fs.cwd(), extension="fits"): # Open the file hdulist = fits.open(path) # -----------------------------------------------------------------
# Parse the command line arguments arguments = parser.parse_args() # ----------------------------------------------------------------- # If an input directory is given if arguments.input is not None: # Determine the full path to the input directory input_path = fs.absolute(arguments.input) # Give an error if the input directory does not exist if not fs.is_directory(input_path): raise argparse.ArgumentError(input_path, "The input directory does not exist") # If no input directory is given, assume the input is placed in the current working directory else: input_path = fs.cwd() # ----------------------------------------------------------------- # If an output directory is given if arguments.output is not None: # Determine the full path to the output directory output_path = fs.absolute(arguments.output) # Create the directory if it does not yet exist if not fs.is_directory(output_path): fs.create_directory(output_path) # If no output directory is given, place the output in the current working directory else: output_path = fs.cwd()
def run_configurable(table_matches, args, tables): """ This function ... :param table_matches: :param args: :param tables: :return: """ # Determine the configuration method configuration_method = None if args.interactive: configuration_method = "interactive" elif args.arguments: configuration_method = "arguments" elif args.configfile is not None: configuration_method = "file:" + args.configfile elif args.rerun: configuration_method = "last" # Resolve subproject, index = table_matches[0] resolved = introspection.resolve_from_match(subproject, tables[subproject], index) # Get properties title = resolved.title command_name = resolved.command_name hidden = resolved.hidden description = resolved.description module_path = resolved.module_path class_name = resolved.class_name configuration_method_table = resolved.configuration_method configuration_module_path = resolved.configuration_module_path subproject_path = introspection.pts_subproject_dir(subproject) # Set sys.argv[0] = fs.join( introspection.pts_root_dir, module_path.replace(".", "/") + ".py" ) # this is actually not necessary (and not really correct, it's not like we are calling the module where the class is..) del sys.argv[1] # but this is important # Get a list of the leftover arguments leftover_arguments = sys.argv[1:] # Welcome message if subproject == "modeling": welcome_modeling() elif subproject == "magic": welcome_magic() elif subproject == "dustpedia": welcome_dustpedia() elif subproject == "evolve": welcome_evolve() # Get the configuration definition definition = introspection.get_configuration_definition_pts_not_yet_in_pythonpath( configuration_module_path) # If not specified on the command line (before the command name), then use the default specified in the commands.dat file if configuration_method is None: configuration_method = configuration_method_table # Check whether arguments are passed and the configuration method is interactive if configuration_method == "interactive" and len(leftover_arguments) > 0: raise ValueError( "Arguments on the command-line are not supported by default for this command. Run with pts --arguments to change this behaviour." ) # Create the configuration config = create_configuration(definition, command_name, description, configuration_method) ## SAVE THE CONFIG if requested if config.write_config: config.saveto(config.config_file_path(command_name)) # If this is not a re-run if not args.rerun: if not fs.is_directory(introspection.pts_user_config_dir): fs.create_directory(introspection.pts_user_config_dir) # CACHE THE CONFIG config_cache_path = fs.join(introspection.pts_user_config_dir, command_name + ".cfg") config.saveto(config_cache_path) # Setup function if subproject == "modeling": setup_modeling(command_name, fs.cwd()) elif subproject == "magic": setup_magic(command_name, fs.cwd()) elif subproject == "dustpedia": setup_dustpedia(command_name, fs.cwd()) elif subproject == "evolve": setup_evolve(command_name, fs.cwd()) # Initialize the logger log = initialize_pts(config, remote=args.remote, command_name=command_name) # Exact command name exact_command_name = subproject + "/" + command_name # If the PTS command has to be executed remotely if args.remote is not None: run_remotely(exact_command_name, config, args.keep, args.remote, log) # The PTS command has to be executed locally else: run_locally(exact_command_name, module_path, class_name, config, args.input_files, args.output_files, args.output, log) # Finish function if subproject == "modeling": finish_modeling(command_name, fs.cwd()) elif subproject == "magic": finish_magic(command_name, fs.cwd()) elif subproject == "dustpedia": finish_dustpedia(command_name, fs.cwd()) elif subproject == "evolve": finish_evolve(command_name, fs.cwd())
level = "DEBUG" if config.debug else "INFO" # Initialize the logger log = logging.setup_log(level=level) log.start("Starting setup ...") # ----------------------------------------------------------------- # Inform the user log.info("Resolving the galaxy name ...") # Get the NGC name of the galaxy ngc_name = catalogs.get_ngc_name(config.name) # Inform the user log.info("Galaxy NGC ID is '" + ngc_name + "'") # Determine the path to the new directory path = fs.join(fs.cwd(), ngc_name) # Create the directory fs.create_directory(path) # Determine the path to the data directory data_path = fs.join(path, "data") # Create the data directory fs.create_directory(data_path) # -----------------------------------------------------------------
from pts.core.simulation.output import output_type_choices # ----------------------------------------------------------------- # Create the configuration definition = ConfigurationDefinition() # Input and output definition.add_optional("input", "directory_path", "input directory for the simulation(s)", letter="i") definition.add_optional("output", "directory_path", "output directory for the simulation(s)", fs.cwd(), letter="o", convert_default=True) # Various flags definition.add_flag( "relative", "treats the given input and output paths as being relative to the ski/fski file" ) definition.add_flag("emulate", "emulate the simulation while limiting computation") # Other definition.add_flag("keep", "keep remote input and output") definition.add_optional( "retrieve_types",
from pts.magic.misc.dustpedia import DustPediaDatabase, get_account from pts.core.basics.configuration import ConfigurationDefinition, ConfigurationReader # ----------------------------------------------------------------- # Create the configuration definition = ConfigurationDefinition() # Get configuration reader = ConfigurationReader("plot_galaxies") config = reader.read(definition) # ----------------------------------------------------------------- # Determine the log file path logfile_path = fs.join(fs.cwd(), time.unique_name("log") + ".txt") if config.report else None # Determine the log level level = "DEBUG" if config.debug else "INFO" # Initialize the logger log = logging.setup_log(level=level, path=logfile_path) log.start("Starting plot_galaxies ...") # ----------------------------------------------------------------- # Local table path local_table_path = fs.join(introspection.pts_dat_dir("modeling"), "s4g", "s4g_p4_table8.dat") # -----------------------------------------------------------------
config = parse_arguments("scaling_plots", definition) # ----------------------------------------------------------------- # Set figsize if config.small: figsize = "8,6" figsize_timelines = "8,8" else: figsize = "12,9" figsize_timelines = "12,12" # ----------------------------------------------------------------- # Locate the scaling test suite directory suite_path = fs.join(fs.cwd(), config.suite_name) if not fs.is_directory(suite_path): raise ValueError("The directory '" + suite_path + "' does not exist") # ----------------------------------------------------------------- # Make directory for output output_path = fs.create_directory_in(fs.cwd(), time.unique_name("scaling_plots")) # Make subdirectories single_node_path = fs.create_directory_in(output_path, "Single-node comparison") multi_node_path = fs.create_directory_in(output_path, "Load balancing and multi-node scaling") communication_path = fs.create_directory_in(output_path, "Communication") hybridization_path = fs.create_directory_in(output_path, "Hybridization") photon_packages_path = fs.create_directory_in(output_path, "Increased number of photon packages") memory_path = fs.create_directory_in(output_path, "Memory scaling")