def __init__( self, name, # these parameters are inherited from the `LRMS` class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth=None, **extra_args): # init base class LRMS.__init__(self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args) self.transition_graph = NORMAL_TRANSITION_GRAPH # initial resource status; # use `max_cores` as the max number of processes to allow self.user_queued = 0 self.free_slots = self.max_cores self.queued = 0 self.user_run = 0 self.available_memory = self.max_cores * self.max_memory_per_core
def __init__(self, name, # this are inherited from the base LRMS class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, # ignored if `transport` is 'local' # these are specific to the this backend frontend, transport, keyfile=None, accounting_delay=15, **extra_args): # init base class LRMS.__init__( self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args) # backend-specific setup self.frontend = frontend if transport == 'local': self.transport = gc3libs.backends.transport.LocalTransport() self._username = getuser() elif transport == 'ssh': auth = self._auth_fn() self._username = auth.username self.transport = gc3libs.backends.transport.SshTransport( frontend, username=self._username, keyfile=keyfile) else: raise gc3libs.exceptions.TransportError( "Unknown transport '%s'" % transport)
def __init__( self, name, # this are inherited from the base LRMS class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, # ignored if `transport` is 'local' # these are specific to the this backend frontend, transport, accounting_delay=15, # SSH-related options; ignored if `transport` is 'local' ssh_config=None, keyfile=None, ignore_ssh_host_keys=False, ssh_timeout=None, **extra_args ): # init base class LRMS.__init__( self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args ) # backend-specific setup self.frontend = frontend if transport == "local": self.transport = gc3libs.backends.transport.LocalTransport() self._username = getuser() elif transport == "ssh": auth = self._auth_fn() self._username = auth.username self.transport = gc3libs.backends.transport.SshTransport( frontend, ignore_ssh_host_keys=ignore_ssh_host_keys, ssh_config=(ssh_config or auth.ssh_config), username=self._username, port=auth.port, keyfile=(keyfile or auth.keyfile), timeout=(ssh_timeout or auth.timeout), ) else: raise gc3libs.exceptions.TransportError("Unknown transport '%s'" % transport)
def __init__( self, name, # this are inherited from the base LRMS class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, # ignored if `transport` is 'local' # these are specific to the this backend frontend, transport, accounting_delay=15, # SSH-related options; ignored if `transport` is 'local' ssh_config=None, keyfile=None, ignore_ssh_host_keys=False, ssh_timeout=None, large_file_threshold=None, large_file_chunk_size=None, spooldir=gc3libs.defaults.SPOOLDIR, **extra_args): # init base class LRMS.__init__(self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args) # backend-specific setup self.frontend = frontend self.spooldir = spooldir if transport == 'local': self.transport = gc3libs.backends.transport.LocalTransport() self._username = getuser() elif transport == 'ssh': auth = self._auth_fn() self._username = auth.username self.transport = gc3libs.backends.transport.SshTransport( frontend, ignore_ssh_host_keys=ignore_ssh_host_keys, ssh_config=(ssh_config or auth.ssh_config), username=self._username, port=auth.port, keyfile=(keyfile or auth.keyfile), timeout=(ssh_timeout or auth.timeout), large_file_threshold=large_file_threshold, large_file_chunk_size=large_file_chunk_size, ) else: raise gc3libs.exceptions.TransportError("Unknown transport '%s'" % transport) self.accounting_delay = accounting_delay
def __init__(self, name, # this are inherited from the base LRMS class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, # these are specific to the ARC0 backend arc_ldap, frontend=None, lost_job_timeout=gc3libs.Default.ARC_LOST_JOB_TIMEOUT, **extra_args): log.warning( "The ARC1 backend (used in resource '%s') is deprecated" " and will be removed in a future release." " Consider changing your configuration.", name) # check if arc module has been imported if not have_arc_module: raise gc3libs.exceptions.LRMSError( "Could not import `arc` module, disable ARC1 resources.") # init base class LRMS.__init__( self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth) # ARC1-specific setup self.lost_job_timeout = lost_job_timeout self.arc_ldap = arc_ldap if frontend is None: if self.arc_ldap is not None: # extract frontend information from arc_ldap entry try: resource_url = gc3libs.url.Url(arc_ldap) self.frontend = resource_url.hostname except Exception, err: raise gc3libs.exceptions.ConfigurationError( "Configuration error: resource '%s' has no valid 'arc_ldap' setting: %s: %s" % (name, err.__class__.__name__, err.message)) else: self.frontend = None
def __init__(self, name, # these parameters are inherited from the `LRMS` class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth=None, **extra_args): # init base class LRMS.__init__( self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args) self.transition_graph = NORMAL_TRANSITION_GRAPH # initial resource status; # use `max_cores` as the max number of processes to allow self.user_queued = 0 self.free_slots = self.max_cores self.queued = 0 self.user_run = 0 self.available_memory = self.max_cores * self.max_memory_per_core
def __init__( self, name, # these parameters are inherited from the `LRMS` class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, # these are specific of the OpenStackLrms class keypair_name, public_key, os_region=None, image_id=None, image_name=None, os_auth_url=None, instance_type=None, auth=None, vm_pool_max_size=None, user_data=None, vm_os_overhead=gc3libs.Default.VM_OS_OVERHEAD, **extra_args ): LRMS.__init__( self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args ) self.free_slots = int(max_cores) self.user_run = 0 self.user_queued = 0 self.queued = 0 self._flavors = [] self.vm_pool_max_size = vm_pool_max_size if vm_pool_max_size is not None: try: self.vm_pool_max_size = int(self.vm_pool_max_size) except ValueError: raise ConfigurationError( "Value for `vm_pool_max_size` must be an integer," " was %s instead." % vm_pool_max_size ) self.subresource_type = self.type.split("+", 1)[1] if self.subresource_type not in available_subresource_types: raise UnrecoverableError("Invalid resource type: %s" % self.type) # Mapping of job.os_instance_id => LRMS self.subresources = {} auth = self._auth_fn() if os_auth_url is None: os_auth_url = os.getenv("OS_AUTH_URL") if os_auth_url is None: raise gc3libs.exceptions.InvalidArgument( "Cannot connect to the OpenStack API:" " No 'OS_AUTH_URL' environment variable defined," " and no 'os_auth_url' argument passed to the EC2 backend." ) self.os_auth_url = os_auth_url self.os_username = auth.os_username self.os_password = auth.os_password self.os_tenant_name = auth.os_project_name self.os_region_name = os_region if self.os_auth_url is None: raise gc3libs.exceptions.InvalidArgument( "Cannot connect to the OpenStack API:" " No 'os_auth_url' argument passed to the OpenStack backend." ) # Keypair names can only contain alphanumeric chars! if not set(keypair_name).issubset(set(ascii_letters + digits + "_")): raise ConfigurationError( "Keypair name `%s` is invalid: keypair names can only contain " "alphanumeric chars: [a-zA-Z0-9_]" % keypair_name ) self.keypair_name = keypair_name self.public_key = os.path.expanduser(os.path.expandvars(public_key.strip())) self.image_id = image_id self.image_name = image_name self.instance_type = instance_type self.user_data = user_data self.vm_os_overhead = gc3libs.quantity.Memory(vm_os_overhead) self._parse_security_group() self._conn = None # `self.subresource_args` is used to create subresources self.subresource_args = extra_args self.subresource_args["type"] = self.subresource_type self.subresource_args["architecture"] = self["architecture"] self.subresource_args["max_cores"] = self["max_cores"] self.subresource_args["max_cores_per_job"] = self["max_cores_per_job"] self.subresource_args["max_memory_per_core"] = self["max_memory_per_core"] self.subresource_args["max_walltime"] = self["max_walltime"] # ShellcmdLrms by default trusts the configuration, instead of # checking the real amount of memory and number of cpus, but # we need the real values instead. if self.subresource_type == gc3libs.Default.SHELLCMD_LRMS: self.subresource_args["override"] = "True" if not image_name and not image_id: raise ConfigurationError("No `image_id` or `image_name` has been specified in the" " configuration file.") # helper for creating sub-resources self._cfgobj = gc3libs.config.Configuration(*gc3libs.Default.CONFIG_FILE_LOCATIONS, auto_enable_auth=True) # Only api version 1.1 are tested so far. self.compute_api_version = "1.1" # "Connect" to the cloud (connection is actually performed # only when needed by the `Client` class. self.client = NovaClient.Client( self.compute_api_version, self.os_username, self.os_password, self.os_tenant_name, self.os_auth_url, region_name=self.os_region_name, ) # Set up the VMPool persistent class. This has been delayed # until here because otherwise self._conn is None pooldir = os.path.join(os.path.expandvars(OpenStackLrms.RESOURCE_DIR), "vmpool", self.name) self._vmpool = OpenStackVMPool(pooldir, self.client)
def __init__(self, name, # these parameters are inherited from the `LRMS` class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth=None, # these are specific to `ShellcmdLrms` frontend='localhost', transport='local', time_cmd=None, override='False', spooldir=None, resourcedir=None, # SSH-related options; ignored if `transport` is 'local' ssh_config=None, keyfile=None, ignore_ssh_host_keys=False, ssh_timeout=None, **extra_args): # init base class LRMS.__init__( self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args) # GNU time is needed self.time_cmd = time_cmd # default is to use $TMPDIR or '/var/tmp' (see # `tempfile.mkftemp`), but we delay the determination of the # correct dir to the submit_job, so that we don't have to use # `transport` right now. self.spooldir = spooldir # Resource dir is expanded in `_gather_machine_specs()` and # only after that the `resource_dir` property will be # set. This is done in order to delay a possible connection to # a remote machine, and avoiding such a remote connection when # it's not needed. self.cfg_resourcedir = resourcedir or ShellcmdLrms.RESOURCE_DIR # Configure transport if transport == 'local': self.transport = gc3libs.backends.transport.LocalTransport() self._username = getuser() elif transport == 'ssh': auth = self._auth_fn() self._username = auth.username self.transport = gc3libs.backends.transport.SshTransport( frontend, ignore_ssh_host_keys=ignore_ssh_host_keys, ssh_config=(ssh_config or auth.ssh_config), username=self._username, port=auth.port, keyfile=(keyfile or auth.keyfile), timeout=(ssh_timeout or auth.timeout), ) else: raise gc3libs.exceptions.TransportError( "Unknown transport '%s'" % transport) self.frontend = frontend # use `max_cores` as the max number of processes to allow self.user_queued = 0 self.queued = 0 self.job_infos = {} self.total_memory = max_memory_per_core self.available_memory = self.total_memory self.override = gc3libs.utils.string_to_boolean(override)
def __init__(self, name, # these parameters are inherited from the `LRMS` class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, # these are specific of the EC2Lrms class ec2_region, keypair_name, public_key, image_id=None, image_name=None, ec2_url=None, instance_type=None, auth=None, vm_pool_max_size=None, user_data=None, **extra_args): LRMS.__init__( self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args) self.free_slots = int(max_cores) self.user_run = 0 self.user_queued = 0 self.queued = 0 self.vm_pool_max_size = vm_pool_max_size if vm_pool_max_size is not None: try: self.vm_pool_max_size = int(self.vm_pool_max_size) except ValueError: raise ConfigurationError( "Value for `vm_pool_max_size` must be an integer," " was %s instead." % vm_pool_max_size) self.subresource_type = self.type.split('+', 1)[1] if self.subresource_type not in available_subresource_types: raise UnrecoverableError("Invalid resource type: %s" % self.type) self.region = ec2_region # Mapping of job.ec2_instance_id => LRMS self.subresources = {} auth = self._auth_fn() self.ec2_access_key = auth.ec2_access_key self.ec2_secret_key = auth.ec2_secret_key if ec2_url is None: ec2_url = os.getenv('EC2_URL') if ec2_url is None: raise gc3libs.exceptions.InvalidArgument( "Cannot connect to the EC2 API:" " No 'EC2_URL' environment variable defined," " and no 'ec2_url' argument passed to the EC2 backend.") self.ec2_url = gc3libs.url.Url(ec2_url) # Keypair names can only contain alphanumeric chars! if re.match(r'.*\W.*', keypair_name): raise ConfigurationError( "Keypair name `%s` is invalid: keypair names can only contain " "alphanumeric chars: [a-zA-Z0-9_]" % keypair_name) self.keypair_name = keypair_name self.public_key = os.path.expanduser( os.path.expandvars(public_key.strip())) self.image_id = image_id self.image_name = image_name self.instance_type = instance_type self._instance_type_specs = {} self.user_data = user_data self._parse_security_group() self._conn = None # `self.subresource_args` is used to create subresources self.subresource_args = extra_args self.subresource_args['type'] = self.subresource_type self.subresource_args['architecture'] = self['architecture'] self.subresource_args['max_cores'] = self['max_cores'] self.subresource_args['max_cores_per_job'] = self['max_cores_per_job'] self.subresource_args['max_memory_per_core'] = \ self['max_memory_per_core'] self.subresource_args['max_walltime'] = self['max_walltime'] # ShellcmdLrms by default trusts the configuration, instead of # checking the real amount of memory and number of cpus, but # we need the real values instead. if self.subresource_type == gc3libs.Default.SHELLCMD_LRMS: self.subresource_args['override'] = 'True' if not image_name and not image_id: raise ConfigurationError( "No `image_id` or `image_name` has been specified in the" " configuration file.") # helper for creating sub-resources self._cfgobj = gc3libs.config.Configuration( *gc3libs.Default.CONFIG_FILE_LOCATIONS, auto_enable_auth=True)
def __init__(self, name, # these parameters are inherited from the `LRMS` class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, # these are specific of the OpenStackLrms class keypair_name, public_key, vm_auth, os_region=None, image_id=None, os_auth_url=None, instance_type=None, auth=None, vm_pool_max_size=None, user_data=None, vm_os_overhead=gc3libs.Default.VM_OS_OVERHEAD, # extra args are used to instanciate "sub-resources" **extra_args): LRMS.__init__( self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args) self.free_slots = int(max_cores) self.user_run = 0 self.user_queued = 0 self.queued = 0 self._flavors = [] self.vm_pool_max_size = vm_pool_max_size if vm_pool_max_size is not None: try: self.vm_pool_max_size = int(self.vm_pool_max_size) except ValueError: raise ConfigurationError( "Value for `vm_pool_max_size` must be an integer," " was %s instead." % vm_pool_max_size) self.subresource_type = self.type.split('+', 1)[1] if self.subresource_type not in available_subresource_types: raise UnrecoverableError("Invalid resource type: %s" % self.type) # Mapping of job.execution.instance_id => LRMS self.subresources = {} auth = self._auth_fn() if os_auth_url is None: os_auth_url = os.getenv('OS_AUTH_URL') if os_auth_url is None: raise gc3libs.exceptions.InvalidArgument( "Cannot connect to the OpenStack API:" " No 'OS_AUTH_URL' environment variable defined," " and no 'os_auth_url' argument passed" " to the OpenStack backend.") self.os_auth_url = os_auth_url self.os_username = auth.os_username self.os_password = auth.os_password self.os_tenant_name = auth.os_project_name self.os_region_name = os_region if self.os_auth_url is None: raise gc3libs.exceptions.InvalidArgument( "Cannot connect to the OpenStack API:" " No 'os_auth_url' argument passed to the OpenStack backend.") # Keypair names can only contain alphanumeric chars! if not set(keypair_name).issubset(set(ascii_letters + digits + '_')): raise ConfigurationError( "Keypair name `%s` is invalid: keypair names can only contain " "alphanumeric chars: [a-zA-Z0-9_]" % keypair_name) self.keypair_name = keypair_name self.public_key = os.path.expanduser( os.path.expandvars(public_key.strip())) self.image_id = image_id self.instance_type = instance_type self.user_data = user_data self.vm_os_overhead = gc3libs.quantity.Memory(vm_os_overhead) self._parse_security_group() self._conn = None # `*_instance_type` config items should be consumed here, # not in any sub-resource for key, value in extra_args.items(): if key.endswith('_instance_type'): self[key] = value extra_args.pop(key) # `self.subresource_args` is used to create subresources self.subresource_args = extra_args self.subresource_args['type'] = self.subresource_type self.subresource_args['architecture'] = self['architecture'] self.subresource_args['max_cores'] = self['max_cores'] self.subresource_args['max_cores_per_job'] = self['max_cores_per_job'] self.subresource_args['max_memory_per_core'] = \ self['max_memory_per_core'] self.subresource_args['max_walltime'] = self['max_walltime'] # SSH-specific configuration self.subresource_args['transport'] = 'ssh' self.subresource_args['auth'] = vm_auth self.subresource_args['ssh_timeout'] = 7 # FIXME: hard-coded! self.subresource_args['ignore_ssh_host_keys'] = True self.subresource_args['keyfile'] = self.public_key if self.subresource_args['keyfile'].endswith('.pub'): self.subresource_args['keyfile'] = \ self.subresource_args['keyfile'][:-len('.pub')] # ShellcmdLrms by default trusts the configuration, instead of # checking the real amount of memory and number of cpus, but # we need the real values instead. if self.subresource_type == gc3libs.Default.SHELLCMD_LRMS: self.subresource_args['override'] = 'True' if image_id is None: raise ConfigurationError( "No `image_id` specified in the configuration file.") # Only API version 1.1 has been tested so far self.compute_api_version = '1.1' # "Connect" to the cloud (connection is actually performed # only when needed by the `Client` class. self.client = NovaClient.Client( self.compute_api_version, self.os_username, self.os_password, self.os_tenant_name, self.os_auth_url, region_name=self.os_region_name) # Set up the VMPool persistent class. This has been delayed # until here because otherwise self._conn is None pooldir = os.path.join(os.path.expandvars(OpenStackLrms.RESOURCE_DIR), 'vmpool', self.name) self._vmpool = OpenStackVMPool(pooldir, self.client)
def __init__( self, name, # these parameters are inherited from the `LRMS` class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, # these are specific of the EC2Lrms class ec2_region, keypair_name, public_key, vm_auth, image_id=None, image_name=None, ec2_url=None, instance_type=None, auth=None, vm_pool_max_size=None, user_data=None, **extra_args): LRMS.__init__(self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args) self.free_slots = int(max_cores) self.user_run = 0 self.user_queued = 0 self.queued = 0 self.vm_pool_max_size = vm_pool_max_size if vm_pool_max_size is not None: try: self.vm_pool_max_size = int(self.vm_pool_max_size) except ValueError: raise ConfigurationError( "Value for `vm_pool_max_size` must be an integer," " was %s instead." % vm_pool_max_size) self.subresource_type = self.type.split('+', 1)[1] if self.subresource_type not in available_subresource_types: raise UnrecoverableError("Invalid resource type: %s" % self.type) self.region = ec2_region # Mapping of job.execution._lrms_vm_id => LRMS self.subresources = {} auth = self._auth_fn() self.ec2_access_key = auth.ec2_access_key self.ec2_secret_key = auth.ec2_secret_key if ec2_url is None: ec2_url = os.getenv('EC2_URL') if ec2_url is None: raise gc3libs.exceptions.InvalidArgument( "Cannot connect to the EC2 API:" " No 'EC2_URL' environment variable defined," " and no 'ec2_url' argument passed to the EC2 backend.") self.ec2_url = gc3libs.url.Url(ec2_url) # Keypair names can only contain alphanumeric chars! if re.match(r'.*\W.*', keypair_name): raise ConfigurationError( "Keypair name `%s` is invalid: keypair names can only contain " "alphanumeric chars: [a-zA-Z0-9_]" % keypair_name) self.keypair_name = keypair_name self.public_key = os.path.expanduser( os.path.expandvars(public_key.strip())) self.image_id = image_id self.image_name = image_name self.instance_type = instance_type self._instance_type_specs = {} self.user_data = user_data self._parse_security_group() self._conn = None # `self.subresource_args` is used to create subresources self.subresource_args = extra_args self.subresource_args['type'] = self.subresource_type self.subresource_args['architecture'] = self['architecture'] self.subresource_args['max_cores'] = self['max_cores'] self.subresource_args['max_cores_per_job'] = self['max_cores_per_job'] self.subresource_args['max_memory_per_core'] = \ self['max_memory_per_core'] self.subresource_args['max_walltime'] = self['max_walltime'] # SSH-specific configuration self.subresource_args['transport'] = 'ssh' self.subresource_args['auth'] = vm_auth self.subresource_args['ssh_timeout'] = 7 # FIXME: hard-coded! self.subresource_args['ignore_ssh_host_keys'] = True self.subresource_args['keyfile'] = self.public_key if self.subresource_args['keyfile'].endswith('.pub'): self.subresource_args['keyfile'] = \ self.subresource_args['keyfile'][:-len('.pub')] # ShellcmdLrms by default trusts the configuration, instead of # checking the real amount of memory and number of cpus, but # we need the real values instead. if self.subresource_type == gc3libs.Default.SHELLCMD_LRMS: self.subresource_args['override'] = 'True' if not image_name and not image_id: raise ConfigurationError( "No `image_id` or `image_name` has been specified in the" " configuration file.")
def __init__(self, name, # these parameters are inherited from the `LRMS` class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, # these are specific of the OpenStackLrms class keypair_name, public_key, vm_auth, os_region=None, image_id=None, os_auth_url=None, instance_type=None, auth=None, vm_pool_max_size=None, user_data=None, vm_os_overhead=gc3libs.Default.VM_OS_OVERHEAD, # extra args are used to instanciate "sub-resources" **extra_args): # Note: this creates attributes from key/value pairs given in the # `extra_args` parameters. In particular, the `self.type` attribute # (referenced below) is set in this chained constructor... LRMS.__init__( self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args) self.free_slots = int(max_cores) self.user_run = 0 self.user_queued = 0 self.queued = 0 self._flavors = [] self.vm_pool_max_size = vm_pool_max_size if vm_pool_max_size is not None: try: self.vm_pool_max_size = int(self.vm_pool_max_size) except ValueError: raise ConfigurationError( "Value for `vm_pool_max_size` must be an integer," " was %s instead." % vm_pool_max_size) # pylint: disable=no-member self.subresource_type = self.type.split('+', 1)[1] if self.subresource_type not in available_subresource_types: raise UnrecoverableError("Invalid resource type: %s" % self.type) # Mapping of job.execution.instance_id => LRMS self.subresources = {} auth = self._auth_fn() if os_auth_url is None: os_auth_url = os.getenv('OS_AUTH_URL') if os_auth_url is None: raise gc3libs.exceptions.InvalidArgument( "Cannot connect to the OpenStack API:" " No 'OS_AUTH_URL' environment variable defined," " and no 'os_auth_url' argument passed" " to the OpenStack backend.") self.os_auth_url = os_auth_url self.os_username = auth.os_username self.os_password = auth.os_password self.os_tenant_name = auth.os_project_name self.os_region_name = os_region if self.os_auth_url is None: raise gc3libs.exceptions.InvalidArgument( "Cannot connect to the OpenStack API:" " No 'os_auth_url' argument passed to the OpenStack backend.") # Keypair names can only contain alphanumeric chars! if not set(keypair_name).issubset(set(ascii_letters + digits + '_')): raise ConfigurationError( "Keypair name `%s` is invalid: keypair names can only contain " "alphanumeric chars: [a-zA-Z0-9_]" % keypair_name) self.keypair_name = keypair_name self.public_key = os.path.expanduser( os.path.expandvars(public_key.strip())) self.image_id = image_id self.instance_type = instance_type self.user_data = user_data self.vm_os_overhead = gc3libs.quantity.Memory(vm_os_overhead) self._parse_security_group() self._conn = None # `*_instance_type` config items should be consumed here, # not in any sub-resource for key, value in extra_args.items(): if key.endswith('_instance_type'): self[key] = value extra_args.pop(key) # `self.subresource_args` is used to create subresources self.subresource_args = extra_args self.subresource_args['type'] = self.subresource_type self.subresource_args['architecture'] = self['architecture'] self.subresource_args['max_cores'] = self['max_cores'] self.subresource_args['max_cores_per_job'] = self['max_cores_per_job'] self.subresource_args['max_memory_per_core'] = \ self['max_memory_per_core'] self.subresource_args['max_walltime'] = self['max_walltime'] # SSH-specific configuration self.subresource_args['transport'] = 'ssh' self.subresource_args['auth'] = vm_auth self.subresource_args['ssh_timeout'] = 7 # FIXME: hard-coded! self.subresource_args['ignore_ssh_host_keys'] = True self.subresource_args['keyfile'] = self.public_key if self.subresource_args['keyfile'].endswith('.pub'): self.subresource_args['keyfile'] = \ self.subresource_args['keyfile'][:-len('.pub')] # ShellcmdLrms by default trusts the configuration, instead of # checking the real amount of memory and number of cpus, but # we need the real values instead. if self.subresource_type == gc3libs.Default.SHELLCMD_LRMS: self.subresource_args['override'] = 'True' if image_id is None: raise ConfigurationError( "No `image_id` specified in the configuration file.") # "Connect" to the cloud (connection is actually performed # only when needed by the `Client` class. self.client = self._new_client() # Set up the VMPool persistent class. This has been delayed # until here because otherwise self._conn is None pooldir = os.path.join(os.path.expandvars(OpenStackLrms.RESOURCE_DIR), 'vmpool', self.name) self._vmpool = OpenStackVMPool(pooldir, self.client)
def __init__( self, name, # these parameters are inherited from the `LRMS` class architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth=None, # these are specific to `ShellcmdLrms` frontend='localhost', transport='local', time_cmd=None, override='False', spooldir=None, resourcedir=None, # SSH-related options; ignored if `transport` is 'local' ssh_config=None, keyfile=None, ignore_ssh_host_keys=False, ssh_timeout=None, **extra_args): # init base class LRMS.__init__(self, name, architecture, max_cores, max_cores_per_job, max_memory_per_core, max_walltime, auth, **extra_args) # GNU time is needed self.time_cmd = time_cmd # default is to use $TMPDIR or '/var/tmp' (see # `tempfile.mkftemp`), but we delay the determination of the # correct dir to the submit_job, so that we don't have to use # `transport` right now. self.spooldir = spooldir # Resource dir is expanded in `_gather_machine_specs()` and # only after that the `resource_dir` property will be # set. This is done in order to delay a possible connection to # a remote machine, and avoiding such a remote connection when # it's not needed. self.cfg_resourcedir = resourcedir or ShellcmdLrms.RESOURCE_DIR # Configure transport if transport == 'local': self.transport = gc3libs.backends.transport.LocalTransport() self._username = getuser() elif transport == 'ssh': auth = self._auth_fn() self._username = auth.username self.transport = gc3libs.backends.transport.SshTransport( frontend, ignore_ssh_host_keys=ignore_ssh_host_keys, ssh_config=(ssh_config or auth.ssh_config), username=self._username, port=auth.port, keyfile=(keyfile or auth.keyfile), timeout=(ssh_timeout or auth.timeout), ) else: raise gc3libs.exceptions.TransportError("Unknown transport '%s'" % transport) self.frontend = frontend # use `max_cores` as the max number of processes to allow self.user_queued = 0 self.queued = 0 self.job_infos = {} self.total_memory = max_memory_per_core self.available_memory = self.total_memory self.override = gc3libs.utils.string_to_boolean(override)