def __init__(self, app): super(ClouderaManagerService, self).__init__(app) self.svc_roles = [ServiceRole.CLOUDERA_MANAGER] self.name = ServiceRole.to_string(ServiceRole.CLOUDERA_MANAGER) self.dependencies = [] self.remaining_start_attempts = NUM_START_ATTEMPTS self.db_pwd = misc.random_string_generator() # Indicate if the web server has been configured and started self.started = False self.cm_port = 7180 # Default cluster configuration # TODO - read local cloud host name! # self.cm_host = socket.gethostname() self.cm_host = self.app.cloud_interface.get_local_hostname() self.host_list = [] self.cluster_name = "Cluster 1" self.cdh_version = "CDH5" self.cdh_version_number = "5" self.cm_username = "******" self.cm_password = "******" self.mgmt_service_name = "ManagementService" self.host_username = "******" self.host_password = self.app.config.get('password') self.cm_repo_url = None self.service_types_and_names = { "HDFS": "HDFS", "YARN": "YARN" }
def __init__(self, app, instance_role='master'): super(PSSService, self).__init__(app) self.svc_roles = [ServiceRole.PSS] self.name = ServiceRole.to_string(ServiceRole.PSS) self.svc_type = ServiceType.CM_SERVICE self.instance_role = instance_role log.debug("Configured PSS as {0}".format(self.instance_role))
def __init__(self, app): super(PostgresService, self).__init__(app) self.name = ServiceRole.to_string(ServiceRole.GALAXY_POSTGRES) self.svc_roles = [ServiceRole.GALAXY_POSTGRES] self.psql_port = app.path_resolver.psql_db_port self.dependencies = [ServiceDependency(self, ServiceRole.GALAXY_DATA), ServiceDependency(self, ServiceRole.MIGRATION)]
def __init__(self, app): super(ClouderaManagerService, self).__init__(app) self.svc_roles = [ServiceRole.CLOUDERA_MANAGER] self.name = ServiceRole.to_string(ServiceRole.CLOUDERA_MANAGER) self.dependencies = [] self.db_pwd = misc.random_string_generator() # Indicate if the web server has been configured and started self.started = False self.cm_port = 7180 # Default cluster configuration self.cm_host = socket.gethostname() self.host_list = [self.cm_host] self.cluster_name = "Cluster 1" self.cdh_version = "CDH5" self.cdh_version_number = "5" self.cm_username = "******" self.cm_password = "******" self.cm_service_name = "ManagementService" self.host_username = "******" # Read the password from the system! self.host_password = self.app.config.get('password') self.cm_repo_url = None self.service_types_and_names = { "HDFS": "HDFS", "YARN": "YARN", "ZOOKEEPER": "ZooKeeper" }
def __init__(self, app): super(HadoopService, self).__init__(app) self.svc_roles = [ServiceRole.HADOOP] self.name = ServiceRole.to_string(ServiceRole.HADOOP) self.dependencies = [ServiceDependency(self, ServiceRole.SGE)] self.id_rsa_path = os.path.join(paths.P_HADOOP_HOME, "id_rsa") self.id_rsa_pub_key_path = os.path.join(paths.P_HADOOP_HOME, "id_rsa.pub")
def __init__(self, app): super(HadoopService, self).__init__(app) self.svc_roles = [ServiceRole.HADOOP] self.name = ServiceRole.to_string(ServiceRole.HADOOP) self.dependencies = [ServiceDependency(self, ServiceRole.SGE)] self.id_rsa_path = os.path.join(paths.P_HADOOP_HOME, "id_rsa") self.id_rsa_pub_key_path = os.path.join( paths.P_HADOOP_HOME, "id_rsa.pub")
def __init__(self, app): super(SlurmdService, self).__init__(app) self.svc_roles = [ServiceRole.SLURMD] self.name = ServiceRole.to_string(ServiceRole.SLURMD) self.dependencies = [ ServiceDependency(self, ServiceRole.SLURMCTLD), ] self.num_restarts = 0 self.max_restarts = 3
def __init__(self, app): super(NodejsProxyService, self).__init__(app) self.svc_roles = [ServiceRole.NODEJSPROXY] self.name = ServiceRole.to_string(ServiceRole.NODEJSPROXY) self.dependencies = [ServiceDependency(self, ServiceRole.GALAXY_TOOLS), ServiceDependency(self, ServiceRole.SUPERVISOR)] self.np_port = 8800 self.supervisor_conf_dir = '/etc/supervisor/conf.d' self.supervisor_prog_name = 'galaxy_nodejs_proxy'
def __init__(self, app): super(CloudgeneService, self).__init__(app) self.svc_roles = [ServiceRole.CLOUDGENE] self.name = ServiceRole.to_string(ServiceRole.CLOUDGENE) self.dependencies = [ServiceDependency(self, ServiceRole.CLOUDERA_MANAGER)] self.port = 8085 self.cg_url = "https://cloudman.s3.amazonaws.com/files/cloudgene-cloudman-daemon.tar.gz" self.cg_base_dir = '/mnt/galaxy/cloudgene/' self.cg_home = os.path.join(self.cg_base_dir, 'cloudgene-daemon')
def __init__(self, app): super(CloudgeneService, self).__init__(app) self.svc_roles = [ServiceRole.CLOUDGENE] self.name = ServiceRole.to_string(ServiceRole.CLOUDGENE) self.dependencies = [ServiceDependency(self, ServiceRole.CLOUDERA_MANAGER)] self.port = 8085 self.cg_url = "http://cloudgene.uibk.ac.at/downloads/cloudgene-cloudman.tar.gz" self.cg_base_dir = '/mnt/galaxy/cloudgene/' self.cg_home = os.path.join(self.cg_base_dir, 'cloudgene-cloudman')
def __init__(self, app): super(PulsarService, self).__init__(app) self.pulsar_home = None self.pulsar_port = DEFAULT_PULSAR_PORT self.name = ServiceRole.to_string(ServiceRole.PULSAR) self.svc_roles = [ServiceRole.PULSAR] self.dependencies = [ ServiceDependency(self, ServiceRole.JOB_MANAGER) ]
def __init__(self, app, as_min=-1, as_max=-1, instance_type=None): super(AutoscaleService, self).__init__(app) self.state = service_states.UNSTARTED self.svc_roles = [ServiceRole.AUTOSCALE] self.svc_type = ServiceType.CM_SERVICE self.name = ServiceRole.to_string(ServiceRole.AUTOSCALE) self.dependencies = [ServiceDependency(self, ServiceRole.MIGRATION)] self.as_max = as_max # Max number of nodes autoscale should maintain self.as_min = as_min # Min number of nodes autoscale should maintain self.instance_type = instance_type # Type of instances to start
def __init__(self, app): """ Initialize the service class by setting role and indicating dependencies """ super(ProFTPdService, self).__init__(app) log.debug("Initializing ProFTPdService") self.svc_roles = [ServiceRole.PROFTPD] self.name = ServiceRole.to_string(ServiceRole.PROFTPD) self.dependencies = [ServiceDependency(self, ServiceRole.GALAXY_POSTGRES), ServiceDependency(self, ServiceRole.GALAXY_DATA)]
def __init__(self, app): super(GalaxyReportsService, self).__init__(app) self.galaxy_home = self.app.path_resolver.galaxy_home self.reports_port = DEFAULT_REPORTS_PORT self.name = ServiceRole.to_string(ServiceRole.GALAXY_REPORTS) self.svc_roles = [ServiceRole.GALAXY_REPORTS] self.dependencies = [ServiceDependency( self, ServiceRole.GALAXY)] # Hopefully Galaxy dependency alone enough to ensure database migrated, etc... self.conf_dir = os.path.join( self.app.path_resolver.galaxy_home, 'reports.conf.d')
def __init__(self, app): super(LwrService, self).__init__(app) self.lwr_home = self.app.path_resolver.lwr_home self.lwr_port = DEFAULT_LWR_PORT self.name = ServiceRole.to_string(ServiceRole.LWR) self.svc_roles = [ServiceRole.LWR] self.dependencies = [ ServiceDependency(self, ServiceRole.SGE), # Well someday anyway :) ServiceDependency(self, ServiceRole.GALAXY_TOOLS) # Anyway to make this depend on where LWR installed? ]
def __init__(self, app): super(SupervisorService, self).__init__(app) self.svc_roles = [ServiceRole.SUPERVISOR] self.name = ServiceRole.to_string(ServiceRole.SUPERVISOR) self.dependencies = [] self.sv_port = 9002 self.pid_file = '/var/run/supervisord.pid' self.main_conf_file = '/etc/supervisord.conf' self.conf_dir = '/etc/supervisor/conf.d' self.server = None self.log_file = '/var/log/supervisor/supervisord.log'
def __init__(self, filesystem, from_archive=None): """ Instance's transient storage exposed over NFS. """ super(TransientStorage, self).__init__(filesystem.app) self.fs = filesystem self.app = self.fs.app self.device = None self.from_archive = from_archive self.svc_roles = [ServiceRole.TRANSIENT_NFS] self.name = ServiceRole.to_string(ServiceRole.TRANSIENT_NFS)
def __init__(self, app): super(NodejsProxyService, self).__init__(app) self.svc_roles = [ServiceRole.NODEJSPROXY] self.name = ServiceRole.to_string(ServiceRole.NODEJSPROXY) self.dependencies = [ ServiceDependency(self, ServiceRole.GALAXY_TOOLS), ServiceDependency(self, ServiceRole.SUPERVISOR) ] self.np_port = 8800 self.supervisor_conf_dir = '/etc/supervisor/conf.d' self.supervisor_prog_name = 'galaxy_nodejs_proxy'
def __init__(self, app, instance_role='master'): super(PSSService, self).__init__(app) self.svc_roles = [ServiceRole.PSS] self.name = ServiceRole.to_string(ServiceRole.PSS) self.instance_role = instance_role log.debug("Configured PSS as {0}".format(self.instance_role)) # Name of the default script to run self.pss_filename = 'post_start_script' if self.instance_role == 'master' \ else 'worker_post_start_script' self.pss_url = self.app.ud.get('post_start_script_url', None) if self.instance_role == 'master' \ else self.app.ud.get('worker_post_start_script_url', None)
def __init__(self, app): super(PulsarService, self).__init__(app) self.name = ServiceRole.to_string(ServiceRole.PULSAR) self.svc_roles = [ServiceRole.PULSAR] self.dependencies = [ ServiceDependency(self, ServiceRole.JOB_MANAGER), ServiceDependency(self, ServiceRole.SUPERVISOR) ] self.pulsar_home = '/mnt/pulsar' self.pulsar_port = 8913 self.supervisor_conf_dir = '/etc/supervisor/conf.d' self.supervisor_prog_name = 'pulsar'
def __init__(self, app): """ Initialize the service class by setting role and indicating dependencies """ super(ProFTPdService, self).__init__(app) log.debug("Initializing ProFTPdService") self.svc_roles = [ServiceRole.PROFTPD] self.name = ServiceRole.to_string(ServiceRole.PROFTPD) self.dependencies = [ ServiceDependency(self, ServiceRole.GALAXY_POSTGRES), ServiceDependency(self, ServiceRole.GALAXY_DATA) ]
def __init__(self, app): super(GalaxyReportsService, self).__init__(app) self.galaxy_home = self.app.path_resolver.galaxy_home self.reports_port = DEFAULT_REPORTS_PORT self.name = ServiceRole.to_string(ServiceRole.GALAXY_REPORTS) self.svc_roles = [ServiceRole.GALAXY_REPORTS] # Hopefully Galaxy dependency alone enough to ensure database migrated, etc... self.dependencies = [ ServiceDependency(self, ServiceRole.GALAXY), ServiceDependency(self, ServiceRole.GALAXY_POSTGRES) ] self.conf_dir = os.path.join(self.app.path_resolver.galaxy_home, 'reports.conf.d')
def __init__(self, app): super(NginxService, self).__init__(app) self.svc_roles = [ServiceRole.NGINX] self.name = ServiceRole.to_string(ServiceRole.NGINX) self.dependencies = [] self.exe = self.app.path_resolver.nginx_executable self.conf_dir = self.app.path_resolver.nginx_conf_dir self.conf_file = self.app.path_resolver.nginx_conf_file # Main conf file self.ssl_is_on = self.app.config.user_data.get('use_ssl', False) self.proxied_services = ['Galaxy', 'GalaxyReports', 'Pulsar', 'ClouderaManager', 'Cloudgene'] # A list of currently active CloudMan services being proxied self.active_proxied = [] self.was_started = False
def __init__(self, app): super(NginxService, self).__init__(app) self.svc_roles = [ServiceRole.NGINX] self.name = ServiceRole.to_string(ServiceRole.NGINX) self.dependencies = [] self.exe = self.app.path_resolver.nginx_executable self.conf_dir = self.app.path_resolver.nginx_conf_dir self.conf_file = self.app.path_resolver.nginx_conf_file # Main conf file self.ssl_is_on = False # The list of services that Nginx service proxies self.proxied_services = ['Galaxy', 'GalaxyReports', 'Pulsar', 'ClouderaManager', 'Cloudgene'] # A list of currently active CloudMan services being proxied self.active_proxied = []
def __init__(self, app, srv_type="master", host=""): """ the srv_type defines whether we are running a master node or a worker node. If we have run a worker the host IP should be passed in the host entry. """ super(HTCondorService, self).__init__(app) log.debug("Condor is preparing") self.svc_roles = [ServiceRole.HTCONDOR] self.name = ServiceRole.to_string(ServiceRole.HTCONDOR) self.srv_type = srv_type if self.srv_type == "master": self.flock_to = "" else: self.host = host
def __init__(self, app): super(MigrationService, self).__init__(app) self.svc_roles = [ServiceRole.MIGRATION] self.name = ServiceRole.to_string(ServiceRole.MIGRATION) self.svc_type = ServiceType.CM_SERVICE self.dependencies = [] if 'filesystems' in self.app.config.user_data: for fs in self.app.config.user_data.get('filesystems') or []: # Wait for galaxy data, indices and tools to come up before attempting migration if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(fs['roles']): self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_DATA)) if ServiceRole.GALAXY_TOOLS in ServiceRole.from_string_array(fs['roles']): self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_TOOLS)) if ServiceRole.GALAXY_INDICES in ServiceRole.from_string_array(fs['roles']): self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_INDICES))
def __init__(self, app): super(ClouderaManagerService, self).__init__(app) self.svc_roles = [ServiceRole.CLOUDERA_MANAGER] self.name = ServiceRole.to_string(ServiceRole.CLOUDERA_MANAGER) self.dependencies = [] self.remaining_start_attempts = NUM_START_ATTEMPTS self.db_pwd = misc.random_string_generator() # Indicate if the web server has been configured and started self.started = False self.cm_port = 7180 # Default cluster configuration # TODO - read local cloud host name! # self.cm_host = socket.gethostname() self.cm_host = self.app.cloud_interface.get_local_hostname() # The actual worker nodes (note: this is a list of Instance objects) # (because get_worker_instances currently depends on tags, which is only # supported by EC2, get the list of instances only for the case of EC2 cloud. # This initialization is applicable only when restarting a cluster. # self.host_list = get_worker_instances() if ( # self.app.cloud_type == 'ec2' or self.app.cloud_type == 'openstack') else [] # self.host_list = ["w1", "w2"] # self.instances = self.app.manager.worker_instances # self.host_list = [l.get_local_hostname() for l in self.app.manager.worker_instances] # self.host_list = [l.get_private_ip for l in self.app.manager.worker_instances] self.host_list = None self.cluster_name = "Cluster 1" self.cdh_version = "CDH5" self.cdh_version_number = "5" self.cm_username = "******" self.cm_password = "******" self.mgmt_service_name = "ManagementService" self.host_username = "******" self.host_password = self.app.config.get('password') self.cm_repo_url = None self.hdfs_service_name = "HDFS" self.hadoop_data_dir_prefix = "/mnt/dfs" self.yarn_service_name = "YARN" self.parcel_version = "5.4.1" self.cmd_timeout = 180 self.api = None self.manager = None self.cluster = None self.hdfs_service = None self.yarn_service = None self.service_types_and_names = {"HDFS": "HDFS", "YARN": "YARN"}
def __init__(self, app): super(MigrationService, self).__init__(app) self.svc_roles = [ServiceRole.MIGRATION] self.name = ServiceRole.to_string(ServiceRole.MIGRATION) self.dependencies = [] if 'filesystems' in self.app.ud: for fs in self.app.ud.get('filesystems') or []: # Wait for galaxy data, indices and tools to come up before attempting migration if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(fs['roles']): self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_DATA)) if ServiceRole.GALAXY_TOOLS in ServiceRole.from_string_array(fs['roles']): self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_TOOLS)) if ServiceRole.GALAXY_INDICES in ServiceRole.from_string_array(fs['roles']): self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_INDICES))
def __init__(self, app): super(SlurmctldService, self).__init__(app) self.svc_roles = [ServiceRole.SLURMCTLD, ServiceRole.JOB_MANAGER] self.name = ServiceRole.to_string(ServiceRole.SLURMCTLD) self.dependencies = [ ServiceDependency(self, ServiceRole.MIGRATION), ServiceDependency(self, ServiceRole.TRANSIENT_NFS), ] self.slurm_info = SlurmInfo() self.num_restarts = 0 self.max_restarts = 3 # This must be the same on the workers self.slurm_lock_file = os.path.join( self.app.path_resolver.slurm_root_nfs, 'slurm.lockfile') # Following a cluster reboot, this file may have been left over so # clean it up before starting the service if os.path.exists(self.slurm_lock_file): os.remove(self.slurm_lock_file)
def __init__(self, app): super(SlurmctldService, self).__init__(app) self.svc_roles = [ServiceRole.SLURMCTLD, ServiceRole.JOB_MANAGER] self.name = ServiceRole.to_string(ServiceRole.SLURMCTLD) self.dependencies = [ ServiceDependency(self, ServiceRole.MIGRATION), ServiceDependency(self, ServiceRole.TRANSIENT_NFS), ] self.slurm_info = SlurmInfo() self.num_restarts = 0 self.max_restarts = 3 # This must be the same on the workers self.slurm_lock_file = os.path.join(self.app.path_resolver.slurm_root_nfs, 'slurm.lockfile') # Following a cluster reboot, this file may have been left over so # clean it up before starting the service if os.path.exists(self.slurm_lock_file): os.remove(self.slurm_lock_file)
def __init__(self, app): super(GalaxyService, self).__init__(app) self.name = ServiceRole.to_string(ServiceRole.GALAXY) self.svc_roles = [ServiceRole.GALAXY] self.remaining_start_attempts = NUM_START_ATTEMPTS self.configured = False # Indicates if the environment for running Galaxy has been configured self.ssl_is_on = False # Environment variables to set before executing galaxy's run.sh self.env_vars = {} self.dependencies = [ ServiceDependency(self, ServiceRole.JOB_MANAGER), ServiceDependency(self, ServiceRole.GALAXY_POSTGRES), ServiceDependency(self, ServiceRole.GALAXY_DATA), ServiceDependency(self, ServiceRole.GALAXY_INDICES), ServiceDependency(self, ServiceRole.GALAXY_TOOLS), ServiceDependency(self, ServiceRole.PROFTPD) ] self.option_manager = galaxy_option_manager(app)
def __init__(self, app): super(GalaxyService, self).__init__(app) self.name = ServiceRole.to_string(ServiceRole.GALAXY) self.svc_roles = [ServiceRole.GALAXY] self.remaining_start_attempts = NUM_START_ATTEMPTS # Indicates if the environment for running Galaxy has been configured self.configured = False self.ssl_is_on = False # Environment variables to set before executing galaxy's run.sh self.env_vars = {} self.dependencies = [ ServiceDependency(self, ServiceRole.JOB_MANAGER), ServiceDependency(self, ServiceRole.GALAXY_POSTGRES), ServiceDependency(self, ServiceRole.GALAXY_DATA), ServiceDependency(self, ServiceRole.GALAXY_INDICES), # ServiceDependency(self, ServiceRole.PROFTPD), ServiceDependency(self, ServiceRole.GALAXY_TOOLS) ] self.option_manager = galaxy_option_manager(app)
def __init__(self, app): super(GalaxyService, self).__init__(app) self.name = ServiceRole.to_string(ServiceRole.GALAXY) self.svc_roles = [ServiceRole.GALAXY] self.remaining_start_attempts = NUM_START_ATTEMPTS self.configured = False # Indicates if the environment for running Galaxy has been configured # Environment variables to set before executing galaxy's run.sh self.env_vars = { "SGE_ROOT": self.app.path_resolver.sge_root, "DRMAA_LIBRARY_PATH": self.app.path_resolver.drmaa_library_path } self.dependencies = [ ServiceDependency(self, ServiceRole.SGE), ServiceDependency(self, ServiceRole.GALAXY_POSTGRES), ServiceDependency(self, ServiceRole.GALAXY_DATA), ServiceDependency(self, ServiceRole.GALAXY_INDICES), ServiceDependency(self, ServiceRole.GALAXY_TOOLS), ServiceDependency(self, ServiceRole.PROFTPD) ] self.option_manager = galaxy_option_manager(app)
def __init__(self, app, as_min=-1, as_max=-1, instance_type=None, num_queued_jobs=2, mean_runtime_threshold=60, num_instances_to_add=1): """ :type as_min: int :param as_min: The minimum number of worker nodes of maintain. :type as_max: int :param as_max: The maximum number of worker nodes to maintain. :type instance_type: str :param instance_type: The type of instance to use. :type num_queued_jobs: int :param num_queued_jobs: Minimum number of jobs that need to be queued before autoscaling will trigger. :type mean_runtime_threshold: int :param mean_runtime_threshold: Mean running job runtime before autoscaling will trigger. :type num_instances_to_add: int :param num_instances_to_add: Number of instances to add when scaling up. """ super(AutoscaleService, self).__init__(app) self.state = service_states.UNSTARTED self.svc_roles = [ServiceRole.AUTOSCALE] self.svc_type = ServiceType.CM_SERVICE self.name = ServiceRole.to_string(ServiceRole.AUTOSCALE) self.dependencies = [ServiceDependency(self, ServiceRole.MIGRATION)] self.as_max = as_max self.as_min = as_min self.instance_type = instance_type
def create(self, filesystem=None): """ Create a new volume. This can be done either from a snapshot (i.e., ``self.from_snapshot_id``) or a blank one if the snapshot ID is not set. Note that ``self.size`` needs to be set if creating a blank volume before calling the method. If creating a volume from a snapshot and ``self.size`` is not set, the new volume will be of the same size as the snapshot. """ if not self.size and not self.from_snapshot_id and not self.from_archive: log.error('Cannot add a {0} volume without a size, snapshot ID or ' 'archive url; aborting.'.format(self.fs)) return False # If creating the volume from a snaphost, get the expected volume size if self.from_snapshot_id and not self.volume: self.snapshot = self.app.cloud_interface.get_snapshot(self.from_snapshot_id) if not self.snapshot: log.error("Did not retrieve Snapshot object for {0}; aborting." .format(self.from_snapshot_id)) return False # We need a size to be able to create a volume, so if none # is specified, use snapshot size if not self.size: si = self.app.cloud_interface.get_snapshot_info(self.from_snapshot_id) self.size = si.get('volume_size') # If it does not already exist, create the volume if self.status == volume_status.NONE: log.debug("Creating a new volume of size '%s' in zone '%s' from " "snapshot '%s' for %s." % (self.size, self.app.cloud_interface.get_zone(), self.from_snapshot_id, self.fs)) self.volume = self.app.cloud_interface.create_volume( self.size, self.app.cloud_interface.get_zone(), snapshot=self.from_snapshot_id) if self.volume: # When creating from a snapshot in Euca, volume.size may be None self.size = int(self.volume.size or 0) log.debug("Created a new volume of size '%s' from snapshot '%s' " "with ID '%s' in zone '%s' for %s." % (self.size, self.from_snapshot_id, self.volume_id, self.app.cloud_interface.get_zone(), self.fs)) else: log.warning("No volume object - did not create a volume?") return False else: log.debug("Tried to create a volume for %s but it is in state '%s' " "(volume ID: %s)" % (self.fs, self.status, self.volume_id)) return False # Add tags to newly created volumes (do this outside the inital if/else # to ensure the tags get assigned even if using an existing volume vs. # creating a new one) self.app.cloud_interface.add_tag( self.volume, 'clusterName', self.app.config['cluster_name']) self.app.cloud_interface.add_tag( self.volume, 'bucketName', self.app.config['bucket_cluster']) if filesystem: self.app.cloud_interface.add_tag(self.volume, 'filesystem', filesystem) self.app.cloud_interface.add_tag(self.volume, 'Name', "{0}FS".format(filesystem)) self.app.cloud_interface.add_tag(self.volume, 'roles', ServiceRole.to_string(self.fs.svc_roles)) return True
def send_add_s3fs(self, bucket_name, svc_roles): msg = 'ADDS3FS | {0} | {1}'.format(bucket_name, ServiceRole.to_string(svc_roles)) self._send_msg(msg)
log.debug("Tried to create a volume but it is in state '%s' (volume ID: %s)" % (self.status, self.volume_id)) # Add tags to newly created volumes (do this outside the inital if/else # to ensure the tags get assigned even if using an existing volume vs. # creating a new one) try: self.app.cloud_interface.add_tag( self.volume, 'clusterName', self.app.config['cluster_name']) self.app.cloud_interface.add_tag( self.volume, 'bucketName', self.app.config['bucket_cluster']) if filesystem: self.app.cloud_interface.add_tag(self.volume, 'filesystem', filesystem) self.app.cloud_interface.add_tag(self.volume, 'Name', "{0}FS".format(filesystem)) self.app.cloud_interface.add_tag(self.volume, 'roles', ServiceRole.to_string(self.fs.svc_roles)) except EC2ResponseError, e: log.error("Error adding tags to volume: %s" % e) def delete(self): """ Delete this volume. """ try: volume_id = self.volume_id self.volume.delete() log.debug("Deleted volume '%s'" % volume_id) self.volume = None except EC2ResponseError, e: log.error("Error deleting volume '%s' - you should delete it manually " "after the cluster has shut down: %s" % (self.volume_id, e))
def __init__(self, app): super(SGEService, self).__init__(app) self.svc_roles = [ServiceRole.SGE, ServiceRole.JOB_MANAGER] self.name = ServiceRole.to_string(ServiceRole.SGE) self.dependencies = [ServiceDependency(self, ServiceRole.MIGRATION)] self.sge_info = SGEInfo()
def __init__(self, app): super(SGEService, self).__init__(app) self.svc_roles = [ServiceRole.SGE] self.name = ServiceRole.to_string(ServiceRole.SGE) self.dependencies = [ServiceDependency(self, ServiceRole.MIGRATION)] self.hosts = []
def create(self, filesystem=None): """ Create a new volume. This can be done either from a snapshot (i.e., ``self.from_snapshot_id``) or a blank one if the snapshot ID is not set. Note that ``self.size`` needs to be set if creating a blank volume before calling the method. If creating a volume from a snapshot and ``self.size`` is not set, the new volume will be of the same size as the snapshot. """ if not self.size and not self.from_snapshot_id and not self.from_archive: log.error('Cannot add a {0} volume without a size, snapshot ID or ' 'archive url; aborting.'.format(self.fs)) return False # If creating the volume from a snaphost, get the expected volume size if self.from_snapshot_id and not self.volume: self.snapshot = self.app.cloud_interface.get_snapshot( self.from_snapshot_id) if not self.snapshot: log.error( "Did not retrieve Snapshot object for {0}; aborting.". format(self.from_snapshot_id)) return False # We need a size to be able to create a volume, so if none # is specified, use snapshot size if not self.size: si = self.app.cloud_interface.get_snapshot_info( self.from_snapshot_id) self.size = si.get('volume_size') # If it does not already exist, create the volume if self.status == volume_status.NONE: log.debug("Creating a new volume of size '%s' in zone '%s' from " "snapshot '%s' for %s." % (self.size, self.app.cloud_interface.get_zone(), self.from_snapshot_id, self.fs)) self.volume = self.app.cloud_interface.create_volume( self.size, self.app.cloud_interface.get_zone(), snapshot=self.from_snapshot_id) if self.volume: # When creating from a snapshot in Euca, volume.size may be None self.size = int(self.volume.size or 0) log.debug( "Created a new volume of size '%s' from snapshot '%s' " "with ID '%s' in zone '%s' for %s." % (self.size, self.from_snapshot_id, self.volume_id, self.app.cloud_interface.get_zone(), self.fs)) else: log.warning("No volume object - did not create a volume?") return False else: log.debug( "Tried to create a volume for %s but it is in state '%s' " "(volume ID: %s)" % (self.fs, self.status, self.volume_id)) return False # Add tags to newly created volumes (do this outside the inital if/else # to ensure the tags get assigned even if using an existing volume vs. # creating a new one) self.app.cloud_interface.add_tag(self.volume, 'Name', self.app.config['cluster_name']) self.app.cloud_interface.add_tag(self.volume, 'bucketName', self.app.config['bucket_cluster']) if self.fs: self.app.cloud_interface.add_tag(self.volume, 'filesystem', self.fs.get_full_name()) self.app.cloud_interface.add_tag( self.volume, 'roles', ServiceRole.to_string(self.fs.svc_roles)) return True