def setup(self, **kwargs): """ This function ... :param kwargs: :return: """ # Call the setup function of the base class super(RemotesConfigurable, self).setup(**kwargs) # If remotes are passed if "remotes" in kwargs: # Get the remotes self.remotes = kwargs.pop("remotes") # Check if they are setup for remote in self.remotes: if not remote.connected: raise RuntimeError("Remotes must be connected") # Remotes are not passed else: # Gather host IDs if self.config.not_remotes is not None: host_ids = [ host_id for host_id in self.config.host_ids if host_id not in self.config.not_remotes ] else: host_ids = self.config.host_ids # Loop over the different hosts for host_id in host_ids: # Setup the remote (login) remote = Remote(log_conda=kwargs.pop("log_conda", False)) if not remote.setup(host_id, one_attempt=self.config.one_attempt): log.warning("Remote host '" + host_id + "' is down: skipping") continue # Add the remote to the list self.remotes.append(remote)
def detach(self): """ This function ... :return: """ # Inform the user log.info("Detaching from the screen session '" + self.screen_name + "' ...") # Check if not self.attached: log.warning("Not attached") # Set displayhook back to custom function self.remote.execute("sys.displayhook = my_display", expect=">>>") # Tmux if self.tmux: # Send Ctrl+b d to detach #self.remote.execute("^B") self.remote.execute("\\x02") # CONTROL B self.remote.execute("d") # Match the prompt self.remote.ssh.prompt() # Screen else: # Send Ctrl+A d to detach #self.remote.ssh.send("^A") self.remote.ssh.send("\\x01") # CONTROL A self.remote.ssh.send("d") # Match the prompt self.remote.ssh.prompt() #print("before", self.remote.ssh.before) #print("after", self.remote.ssh.after) # Extra empty line self.remote.ssh.send("") self.remote.ssh.prompt()
def setup(self, **kwargs): """ This function ... :param kwargs: :return: """ # Call the setup function of the base class super(RemotesConfigurable, self).setup(**kwargs) # If remotes are passed if "remotes" in kwargs: # Get the remotes self.remotes = kwargs.pop("remotes") # Check if they are setup for remote in self.remotes: if not remote.connected: raise RuntimeError("Remotes must be connected") # Remotes are not passed else: # Gather host IDs #if self.config.not_remotes is not None: host_ids = [host_id for host_id in self.config.host_ids if host_id not in self.config.not_remotes] #else: host_ids = self.config.host_ids # Loop over the different hosts #for host_id in host_ids: for host in self.config.hosts: # Create the remote object remote = Remote(log_conda=kwargs.pop("log_conda", False)) # Login to the remote #if not remote.setup(host_id, one_attempt=self.config.one_attempt, cluster_name=self.config.clustername): if not remote.setup(host, one_attempt=self.config.one_attempt): log.warning("Remote host '" + host.id + "' is down: skipping") continue # Add the remote to the list self.remotes.append(remote)
def restore_differences(self): """ This function ... :return: """ # Inform the user log.info("Making a backup of the flux differences ...") # Loop over the generations for generation_name in self.generation_names: # Has generation? if not self.has_generation(generation_name): continue # Debugging log.debug("Creating backups for generation '" + generation_name + "' ...") # Get the generation generation = self.generations[generation_name] # Get restore path generation_path = self.get_generation_restore_path(generation_name) # Loop over the simulations for simulation_name in generation.simulation_names: # Set simulation path simulation_path = fs.join(generation_path, simulation_name) # Check whether the simulation has differences filepath = fs.join(simulation_path, "differences.dat") if not fs.is_file(filepath): log.warning("No differences table for simulation '" + simulation_name + "' of generation '" + generation_name + "'") continue # Debugging log.debug("Restoring differences table for simulation '" + simulation_name + "' ...") # Copy the file fs.copy_file(filepath, generation.get_simulation_sed_differences_path(simulation_name))
def restore_fluxes_plots(self): """ This function ... :return: """ # Inform the user log.info("Making a backup of the mock fluxes plots ...") # Loop over the generations for generation_name in self.generation_names: # Has generation? if not self.has_generation(generation_name): continue # Debugging log.debug("Creating backups for generation '" + generation_name + "' ...") # Get the generation generation = self.generations[generation_name] # Get restore path generation_path = self.get_generation_restore_path(generation_name) # Loop over the simulations for simulation_name in generation.simulation_names: # Set simulation path simulation_path = fs.join(generation_path, simulation_name) # Check whether fluxes are present filepath = fs.join(simulation_path, "earth_fluxes.pdf") if not fs.is_file(filepath): log.warning("No mock SED plot for simulation '" + simulation_name + "' of generation '" + generation_name + "'") continue # Debugging log.debug("Restoring fluxes plot for simulation '" + simulation_name + "' ...") # Copy the plot file fs.copy_file(filepath, generation.get_mock_sed_plot_path(simulation_name))
def import_package(self, name, as_name=None, from_name=None, show_output=False): """ This function ... :param name: :param as_name: :param from_name: :param show_output: :return: """ command = "" if from_name is not None: command += "from " + from_name + " " command += "import " + name if as_name is not None: command += " as " + as_name # Execute the import command #output = self.send_line(command, output=True, show_output=log.is_debug()) output = self.send_line(command, show_output=show_output) # If output is given, this is normally not so good if len(output) > 0: # Check output last_line = output[-1] if "cannot import" in last_line: log.warning(last_line) if "ImportError" in last_line: log.warning(last_line) return False return True
# Loop over the names for prep_name in environment.preparation_names: # Load the statistics statistics = load_statistics(modeling_path, prep_name) # Determine filter fltr = parse_filter(prep_name) # Get the extinction att = attenuation.extinction_for_filter(fltr) if att == 0.0: if statistics.attenuation != 0.0: log.warning( prep_name + ": attenuation is zero but preparation attenuation value was " + str(statistics.attenuation)) continue # Ratio ratio = statistics.attenuation / att rel = abs((statistics.attenuation - att) / att) #print(prep_name, statistics.attenuation, ext, ratio, rel * 100) print(prep_name) print("") print(" - preparation: " + str(statistics.attenuation)) print(" - real: " + str(ext)) print(" - ratio: " + str(ratio)) print(" - rel difference: " + str(rel * 100) + "%")
local_version = None # Check present and version remotely if dependency in remote_packages: remotely_present = True remote_version = remote_packages[dependency] else: # Check again for present by importing remotely_present = remote_python.is_present_package(dependency) remote_version = None # If present both locally and remotely if locally_present and remotely_present: if config.versions: if local_version is None and remote_version is not None: log.warning(dependency + ": local version unknown") elif remote_version is None and local_version is not None: log.warning(dependency + ": remote version unknown") elif remote_version is None and local_version is None: log.warning(dependency + ": local and remote version unknown") elif local_version == remote_version: log.success(dependency + ": OK") else: log.warning(dependency + ": version " + local_version + " locally and version " + remote_version + " remotely") else: log.success(dependency + ": OK") # Not present on at least one system elif remotely_present and not locally_present: log.error(dependency + ": not present on this system") elif locally_present and not remotely_present: log.error(dependency + ": not present on remote '" + config.remote + "'") else: log.error(dependency + ": not present on either this sytem or remote '" + config.remote + "'") # -----------------------------------------------------------------
# No maps if len(maps) == 0: log.error("No " + which_map + " maps (yet)") continue # Loop over the maps for name in maps: # Get the map comparison_map = maps[name] #print(comparison_map.wcs.is_celestial) #print(comparison_map.wcs.has_celestial) if not comparison_map.wcs.is_celestial: log.warning("The " + name + " " + which_map + " dust map doesn't have a celestial WCS: skipping ...") continue # Debugging log.debug( "Bringing the reference and comparison image to the same resolution ..." ) # Rebin to same pixel grid frames = NamedFrameList(reference=the_map, comparison=comparison_map) #frames.set_uniform_properties() #frames.show_coordinate_systems() frames.convolve_and_rebin()
def import_package(self, name, as_name=None, from_name=None, show_output=False, return_false_if_fail=True, return_failed_module=False): """ This function ... :param name: :param as_name: :param from_name: :param show_output: :param return_false_if_fail: :param return_failed_module: :return: """ command = "" if from_name is not None: command += "from " + from_name + " " command += "import " + name if as_name is not None: command += " as " + as_name # Execute the import command #output = self.send_line(command, output=True, show_output=log.is_debug) output = self.send_line(command, show_output=show_output) # If output is given, this is normally not so good if len(output) > 0: # Check output last_line = output[-1] # Error if "cannot import" in last_line: #log.warning(last_line) which = last_line.split("cannot import")[1] message = "[" + self.host_id + "] Cannot import " + which if return_false_if_fail: log.warning(message) else: raise ImportError(message) # Error elif "ImportError" in last_line: #log.warning(last_line) message = last_line.split("ImportError")[1] message = "[" + self.host_id + "] " + message if return_false_if_fail: log.warning(message) else: raise ImportError(message) # What else? else: # Show output for line in output: log.warning(line) # Probably OKAY, RETURN SUCCESS if return_failed_module: return True, None, None else: return True # NOT SUCCESS # Read the traceback module_name = None import_statement = None for line in reversed(output[:-1]): line = line.strip() if line.startswith("import"): module_name = line.split("import ")[1].split(" ")[0] import_statement = line break elif line.startswith("from") and "import" in line: module_name = line.split("from ")[1].split(" ")[0] import_statement = line break # Get base module name if module_name is not None: base_module_name = module_name.split(".")[0] else: base_module_name = None # Give command if base_module_name: import_statement = command # Import failed if return_failed_module: return False, base_module_name, import_statement else: return False # Import was succesfull if return_failed_module: return True, None, None else: return True
# Parse the arguments into a configuration config = parse_arguments( "clear_tasks", definition, description="Clear PTS tasks for a certain remote host") # ----------------------------------------------------------------- # Loop over the remote hosts for host_id in config.remotes: # Check whether the remote is available if config.full: remote = Remote() if not remote.setup(host_id): log.warning("The remote host '" + host_id + "' is not available: skipping ...") continue else: remote = None # Determine the path to the run directory for the specified remote host host_run_path = fs.join(introspection.skirt_run_dir, host_id) # Check if there are simulations if not fs.is_directory(host_run_path): log.debug("No run directory for host '" + host_id + "'") continue if fs.is_empty(host_run_path): log.debug("No simulations for host '" + host_id + "'") # Loop over the simulation files in the run directory
# ----------------------------------------------------------------- # Parse the arguments into a configuration config = parse_arguments("clear_tasks", definition, description="Clear PTS tasks for a certain remote host") # ----------------------------------------------------------------- # Loop over the remote hosts for host_id in config.remotes: # Check whether the remote is available if config.full: remote = Remote() if not remote.setup(host_id): log.warning("The remote host '" + host_id + "' is not available: skipping ...") continue else: remote = None # Determine the path to the run directory for the specified remote host host_run_path = fs.join(introspection.skirt_run_dir, host_id) # Check if there are simulations if not fs.is_directory(host_run_path): log.debug("No run directory for host '" + host_id + "'") continue if fs.is_empty(host_run_path): log.debug("No simulations for host '" + host_id + "'") # Loop over the simulation files in the run directory for path, name in fs.files_in_path(host_run_path, extension="sim", returns=["path", "name"], sort=int):
# Create remote remote = Remote(host_id=config.remote) # Determine path if config.remote_path is not None: find_path = remote.absolute_or_in_home(config.remote_path) else: find_path = remote.home_directory #print(find_path) #print(remote.items_in_path(find_path, recursive=True)) # Loop over the files paths = remote.files_in_path(find_path, contains=config.contains, not_contains=config.not_contains, extension=config.extension, recursive=config.recursive, exact_name=config.exact_name, exact_not_name=config.exact_not_name) nfiles = len(paths) if nfiles == 0: log.warning("No files found") else: log.info(str(nfiles) + " files found") for path in paths: if config.full: print(path) else: print(path.split(find_path)[1]) if config.remove: remote.remove_file(path) # LOCALLY else: # Determine path find_path = fs.cwd() # Loop over the files paths = fs.files_in_path(find_path, contains=config.contains, extension=config.extension, recursive=config.recursive,
definition.add_flag("qt", "also remove Qt installation") definition.add_flag("one_attempt", "only perform one attempt at connecting to a remote") # Get the config config = parse_arguments("deinstall_all", definition) # ----------------------------------------------------------------- # Loop over the remote hosts for host_id in find_host_ids(): # Setup remote = Remote() if not remote.setup(host_id, one_attempt=config.one_attempt): log.warning("Remote host '" + host_id + "' is offline") continue # Create uninstaller uninstaller = Uninstaller() # Set options uninstaller.config.skirt_and_or_pts = config.skirt_and_or_pts uninstaller.config.conda = config.conda uninstaller.config.qt = config.qt # Inform the user log.info("Running the uninstaller for remote host '" + host_id + "' ...") # Uninstall uninstaller.run(remote=remote)
# Determine origin origin = instrument_to_origin(name.split("_")[1]) # Determine local directory for this image origin_path = fs.join(environment.data_images_path, origin) if not fs.is_directory(origin_path): fs.create_directory(origin_path) # Determine local path local_path = fs.join(origin_path, name) #print("local_path") # Check whether the image is not present if fs.is_file(local_path): log.warning( "The '" + name + "' remotely cached image is still present locally. Keeping this file and throwing the remote file away." ) continue else: # Infomr log.info("Downloading the '" + name + "' file to [" + origin_path + "] ...") # Download remote.download_file_to(path, origin_path, remove=True) # Success log.success("Succesfully retrieved the '" + name + "' image ...") # Inform the user log.info("Clearing the remote data structure ...")
# Import the relevant PTS classes and modules from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments from pts.core.remote.host import find_host_ids from pts.core.remote.remote import Remote from pts.core.basics.log import log # ----------------------------------------------------------------- # Create definition definition = ConfigurationDefinition() definition.add_positional_optional("remotes", "string_list", "remote hosts on which to clear", choices=find_host_ids(), default=find_host_ids()) # Create setter config = parse_arguments("clear_sessions_and_temp", definition) # ----------------------------------------------------------------- # Loop over the remote hosts for host_id in config.remotes: # Setup the remote remote = Remote() if not remote.setup(host_id): log.warning("Could not connect to remote host '" + host_id + "'") continue # Clear temporary directory and clear sessions remote.clear_temp_and_sessions() # -----------------------------------------------------------------
definition.add_flag("conda", "also remove conda installation") definition.add_flag("qt", "also remove Qt installation") definition.add_flag("one_attempt", "only perform one attempt at connecting to a remote") # Get the config config = parse_arguments("deinstall_all", definition) # ----------------------------------------------------------------- # Loop over the remote hosts for host_id in find_host_ids(): # Setup remote = Remote() if not remote.setup(host_id, one_attempt=config.one_attempt): log.warning("Remote host '" + host_id + "' is offline") continue # Create uninstaller uninstaller = Uninstaller() # Set options uninstaller.config.skirt_and_or_pts = config.skirt_and_or_pts uninstaller.config.conda = config.conda uninstaller.config.qt = config.qt # Inform the user log.info("Running the uninstaller for remote host '" + host_id + "' ...") # Uninstall uninstaller.run(remote=remote)
for name in tests_for_subproject(subproject): # Determine the test path test_path = path_for_test(subproject, name) # Find file with name test.py filepath = fs.join(test_path, "test.py") # Load the test module test_module = imp.load_source(name, filepath) # Get properties of the test module try: description = test_module.description except AttributeError: log.warning("Description not specified for test '" + name + "'") # Iterate over these: commands = test_module.commands # Loop over the commands for command in commands: the_command = command.command # Find match match = introspection.resolve_command_tables(command, tables) # Get the class name cls = introspection.get_class(match.module_path, match.class_name)
# Check present and version remotely if dependency in remote_packages: remotely_present = True remote_version = remote_packages[dependency] else: # Check again for present by importing remotely_present = rmeote_python.is_present_package(dependency) remote_version = None # If present both locally and remotely if locally_present and remotely_present: if config.versions: if local_version is None and remote_version is not None: log.warning(dependency + ": local version unknown") elif remote_version is None and local_version is not None: log.warning(dependency + ": remote version unknown") elif remote_version is None and local_version is None: log.warning(dependency + ": local and remote version unknown") elif local_version == remote_version: log.success(dependency + ": OK") else: log.warning(dependency + ": version " + local_version + " locally and version " + remote_version + " remotely") else: log.success(dependency + ": OK") # Not present on at least one system
# Show which wavelengths are used to create filter frames if len(used_wavelengths) > 0: print("") print(fmt.underlined + fmt.blue + "Used wavelengths and corresponding filter(s):" + fmt.reset) print("") for wavelength_micron in used_wavelengths: filters = used_wavelengths[wavelength_micron] filter_names = [str(f) for f in filters] nfilters = len(filter_names) if nfilters == 1: print(fmt.green + " - " + str(wavelength_micron) + " micron: " + filter_names[0] + fmt.reset) else: print(fmt.yellow + " - " + str(wavelength_micron) + " micron: " + fmt.bold + ", ".join(filter_names) + fmt.reset) print("") # ----------------------------------------------------------------- # Wavelength grid for use with spectral convolution if spectral_convolution: check_grid_convolution() # No spectral convolution else: check_grid_no_convolution() # ----------------------------------------------------------------- if config.show and config.grid_path is not None: log.warning("Cannot show") elif config.show: path = fitting_run.get_wavelength_grid_plot_path(config.name) fs.open_file(path) # -----------------------------------------------------------------
# Create remote remote = Remote(host_id=config.remote) # Determine path if config.remote_path is not None: find_path = remote.absolute_or_in_home(config.remote_path) else: find_path = remote.home_directory #print(find_path) #print(remote.items_in_path(find_path, recursive=True)) # Loop over the files paths = remote.files_in_path(find_path, contains=config.contains, not_contains=config.not_contains, extension=config.extension, recursive=config.recursive) if len(paths) == 0: log.warning("No files found") else: for path in paths: if config.full: print(path) else: print(path.split(find_path)[1]) # LOCALLY else: # Determine path find_path = fs.cwd() # Loop over the files paths = fs.files_in_path(find_path, contains=config.contains, extension=config.extension, recursive=config.recursive) if len(paths) == 0: log.warning("No files found")
from pts.core.basics.log import log # ----------------------------------------------------------------- # Create definition definition = ConfigurationDefinition() definition.add_positional_optional("remotes", "string_list", "remote hosts on which to clear", choices=find_host_ids(), default=find_host_ids()) # Create setter config = parse_arguments("clear_sessions_and_temp", definition) # ----------------------------------------------------------------- # Loop over the remote hosts for host_id in config.remotes: # Setup the remote remote = Remote() if not remote.setup(host_id): log.warning("Could not connect to remote host '" + host_id + "'") continue # Clear temporary directory and clear sessions remote.clear_temp_and_sessions() # -----------------------------------------------------------------
# Get the test names for this subproject for name in tests_for_subproject(subproject): # Determine the test path test_path = path_for_test(subproject, name) # Find file with name test.py filepath = fs.join(test_path, "test.py") # Load the test module test_module = imp.load_source(name, filepath) # Get properties of the test module try: description = test_module.description except AttributeError: log.warning("Description not specified for test '" + name + "'") # Iterate over these: commands = test_module.commands # Loop over the commands for command in commands: the_command = command.command # Find match match = introspection.resolve_command_tables(command, tables) # Get the class name cls = introspection.get_class(match.module_path, match.class_name)
# Load the preparation statistics statistics = load_statistics(modeling_path, prep_name) # Determine filter fltr = parse_filter(prep_name) # Get the extinction att = attenuation.extinction_for_filter(fltr) # Attenuation zero but still corrected for attenuation that is nonzero if att == 0.0 and statistics.attenuation != 0.0: # Give warning and add log.warning( prep_name + ": attenuation is zero but preparation attenuation value was " + str(statistics.attenuation)) fix[prep_name] = (att, statistics.attenuation) # Attenuation nonzero but not corrected if att != 0.0 and statistics.attenuation == 0.0: # Give warning and add log.warning(prep_name + ": attenuation is nonzero but not corrected") fix[prep_name] = (att, statistics.attenuation) # Attenuations not equal elif not numbers.is_close(att, statistics.attenuation): # Give warning and add log.warning(prep_name + ": attenuation of " + tostr(att) +