def __init__(self,name,config): self.deviceName = name ConfigParser.__init__(self) for attr_name in config: self[attr_name] = attr = Attribute(attr_name) try: attr.setGetURL( config[attr_name]['get']['URL'] ) attr.setGetParam( config[attr_name]['get']['attribute'] ) attr.setGetScript(config[attr_name]['get']['script']) except KeyError as e: attr.setGetURL("NA") attr.setGetParam("NA") attr.setGetScript("NA") try: attr.setSetURL( config[attr_name]['set']['URL'] ) attr.setSetParam( config[attr_name]['set']['attribute'] ) attr.setSetScript(config[attr_name]['set']['script']) except KeyError as e: attr.setSetURL("NA") attr.setSetParam("NA") attr.setSetScript("NA") except TypeError as t: raise InvalidEntry
def __init__(self, sample_data_json): # Parse and validate SampleSet config file sample_data_spec = "System/Datastore/SampleSet.validate" config_parser = ConfigParser(sample_data_json, sample_data_spec) self.config = config_parser.get_config() # Create Sample Objects self.samples = self.__create_samples() # Check that sample-level metadata types are identical for every sample self.__check_samples() # Get types of data available self.__file_types = list(self.samples[0].get_paths().keys()) self.__sample_data_types = list(self.samples[0].get_data().keys()) self.__global_data_types = [ x for x in list(self.config.keys()) if x != "sample" ] # Sample order self.sample_names = [sample.name for sample in self.samples] # Organize global and sample-level metadata by data type self.data = self.__organize_data_by_type()
def __init__(self, resource_config_file): # Parse and validate ResourceKit config file resource_config_spec = "System/Datastore/ResourceKit.validate" config_parser = ConfigParser(resource_config_file, resource_config_spec) self.config = config_parser.get_config() # Get list of path resources self.resources = self.__init_resource_files() self.resources = self.__organize_by_type() # Parse list of docker images self.dockers = self.__init_docker_images()
def __init__( self, name, platform_config_file, final_output_dir, config_spec=f"{CC_MAIN_DIR}/System/Platform/Platform.validate", generate_script=False): # Platform name self.name = name # Only generating script self.generate_script = generate_script # Initialize platform config config_parser = ConfigParser(platform_config_file, config_spec) self.config = config_parser.get_config() # Obtain the constants from the platform config self.NR_CPUS = { "TOTAL": self.config["PLAT_MAX_NR_CPUS"], "MAX": self.config["INST_MAX_NR_CPUS"], "MIN": self.config["INST_MIN_NR_CPUS"] } self.MEM = { "TOTAL": self.config["PLAT_MAX_MEM"], "MAX": self.config["INST_MAX_MEM"], "MIN": self.config["INST_MIN_MEM"] } self.DISK_SPACE = { "TOTAL": self.config["PLAT_MAX_DISK_SPACE"], "MAX": self.config["INST_MAX_DISK_SPACE"], "MIN": self.config["INST_MIN_DISK_SPACE"] } # Obtain the identity and the secret self.identity = self.config["identity"] self.secret = self.config.get("secret", None) # Obtain remaining parameters from the configuration file self.cmd_retries = self.config["cmd_retries"] # TODO: I still have to add this, because Datastore required a work directory self.wrk_dir = "/data" self.final_output_dir = self.standardize_dir(final_output_dir) # Save extra variables self.extra = self.config.get("extra", {}) self.lockable = True
def __init__(self, pipeline_config_file): # Parse and validate pipeline config pipeline_config_spec = "System/Graph/Graph.validate" config_parser = ConfigParser(pipeline_config_file, pipeline_config_spec) self.config = config_parser.get_config() # Generate graph self.tasks, self.adj_list = self.__generate_graph() # Check validity of adjacency list self.__check_adjacency_list() # Check for cycles self.__check_cycles()
def __init__(self, name, platform_config_file, final_output_dir): # Platform name self.name = name # Initialize platform config config_parser = ConfigParser(platform_config_file, self.CONFIG_SPEC) self.config = config_parser.get_config() # Platform-wide resource limits self.TOTAL_NR_CPUS = self.config["PLAT_MAX_NR_CPUS"] self.TOTAL_MEM = self.config["PLAT_MAX_MEM"] self.TOTAL_DISK_SPACE = self.config["PLAT_MAX_DISK_SPACE"] # Single process max resource limit self.MAX_NR_CPUS = self.config["PROC_MAX_NR_CPUS"] self.MAX_MEM = self.config["PROC_MAX_MEM"] self.MAX_DISK_SPACE = self.config["PROC_MAX_DISK_SPACE"] # Single process min resource limits self.MIN_NR_CPUS = 1 self.MIN_MEM = 1 self.MIN_DISK_SPACE = 1 # Check to make sure resource limits are fine self.__check_resources() # Define workspace directory names self.wrk_dir = self.config["workspace_dir"] self.final_output_dir = self.standardize_dir(final_output_dir) # Dictionary to hold processors currently managed by the platform self.processors = {} # Platform resource threading lock self.platform_lock = threading.Lock() # Boolean flag to lock processor creation upon cleanup self.__locked = False # Current resource levels self.cpu = 0 self.mem = 0 self.disk_space = 0 self.dealloc_procs = []
def __init__(self, configfile, configpath=None): config = con(configfile, configpath) ConfigParser.__init__(self) for nodename in config: self[nodename] = node = Node(nodename) try: node.setHost(config[nodename]['hostname']) node.setUsername(config[nodename]['username']) node.setPassword(config[nodename]['password']) except KeyError as ke: if 'hostname' in ke: raise HostNameMissing elif 'username' in ke: raise UserNameMissing elif 'password' in ke: raise PasswordMissing else: raise KeyError except TypeError as t: raise InvalidEntry
def __init__(self, name, platform_config_file, final_output_dir): # Platform name self.name = name # Initialize platform config config_parser = ConfigParser(platform_config_file, self.CONFIG_SPEC) self.config = config_parser.get_config() # Only consider the specs related to the current platform self.config = self.config[self.__class__.__name__] # Obtain the constants from the platform config self.NR_CPUS = { "TOTAL": self.config["PLAT_MAX_NR_CPUS"], "MAX": self.config["INST_MAX_NR_CPUS"], "MIN": self.config["INST_MIN_NR_CPUS"] } self.MEM = { "TOTAL": self.config["PLAT_MAX_MEM"], "MAX": self.config["INST_MAX_MEM"], "MIN": self.config["INST_MIN_MEM"] } self.DISK_SPACE = { "TOTAL": self.config["PLAT_MAX_DISK_SPACE"], "MAX": self.config["INST_MAX_DISK_SPACE"], "MIN": self.config["INST_MIN_DISK_SPACE"] } # Obtain the identity and the secret self.identity = self.config["identity"] self.secret = self.config.get("secret", None) # Obtain processing locations self.region = self.config["region"] self.zone = self.config.get("zone", None) if self.zone is None: self.zone = self.get_random_zone() # Obtain remaining parameters from the configuration file self.cmd_retries = self.config["cmd_retries"] self.ssh_connection_user = self.config["ssh_connection_user"] # Obtain disk image name self.disk_image = self.config["disk_image"] self.disk_image_obj = None # TODO: I still have to add this, because Datastore required a work directory self.wrk_dir = "/data" self.final_output_dir = self.standardize_dir(final_output_dir) # Save extra variables self.extra = self.config.get("extra", {}) # Dictionary to hold instances currently managed by the platform self.instances = {} # Platform resource threading lock self.platform_lock = threading.Lock() # Boolean flag to lock instance creation upon cleanup self.__locked = False # Platform resource used for marking cancellation/failure and the source task for failure self.pipeline_failure_source = "" # Current resource levels self.cpu = 0 self.mem = 0 self.disk_space = 0 # TODO: figure out the ssh_connection_user from platform_config # Initialize the location of the CloudConductor ssh_key self.ssh_private_key = None # Check if CloudInstance class is set by the user self.CloudInstanceClass = self.get_cloud_instance_class()
def __init__(self, configfile, configpath=None): config = con(configfile,configpath) ConfigParser.__init__(self) for devicename in config: self[devicename] = device = Device(devicename,config[devicename])