Exemplo n.º 1
0
 def _update_user_data(self):
     if 'filesystems' in self.app.ud:
         old_fs_list = self.app.ud.get('filesystems') or []
         new_fs_list = []
         # clear 'services' and replace with the new format
         for fs in old_fs_list:
             svc_roles = ServiceRole.from_string_array(fs['roles'])
             if ServiceRole.GALAXY_TOOLS in svc_roles and ServiceRole.GALAXY_DATA in svc_roles:
                 # This condition should only occur in new configs, but check
                 # added so that things work properly even if run against a new config
                 # Only works for default configs though...
                 new_fs_list.append(fs)
             elif ServiceRole.GALAXY_TOOLS in svc_roles:
                 pass  # skip adding the galaxy tools file system, no longer needed.
             else:
                 if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(fs['roles']):
                     fs['roles'] = ServiceRole.to_string_array([ServiceRole.GALAXY_TOOLS,
                         ServiceRole.GALAXY_DATA])
                     new_fs_list.append(fs)
                 else:
                     new_fs_list.append(fs)
         self.app.ud['filesystems'] = new_fs_list
     self.app.ud['deployment_version'] = 2
     self.app.ud.pop('galaxy_home', None)  # TODO: Galaxy home is always reset
                                           # to default. Discuss implications
     return True
Exemplo n.º 2
0
 def _update_user_data(self):
     if 'filesystems' in self.app.config.user_data:
         old_fs_list = self.app.config.user_data.get('filesystems') or []
         new_fs_list = []
         # clear 'services' and replace with the new format
         for fs in old_fs_list:
             svc_roles = ServiceRole.from_string_array(fs['roles'])
             if ServiceRole.GALAXY_TOOLS in svc_roles and ServiceRole.GALAXY_DATA in svc_roles:
                 # This condition should only occur in new configs, but check
                 # added so that things work properly even if run against a new config
                 # Only works for default configs though...
                 new_fs_list.append(fs)
             elif ServiceRole.GALAXY_TOOLS in svc_roles:
                 pass  # skip adding the galaxy tools file system, no longer needed.
             else:
                 if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(fs['roles']):
                     fs['roles'] = ServiceRole.to_string_array([ServiceRole.GALAXY_TOOLS,
                                                                ServiceRole.GALAXY_DATA])
                     new_fs_list.append(fs)
                 else:
                     new_fs_list.append(fs)
         self.app.config.user_data['filesystems'] = new_fs_list
     self.app.config.user_data['deployment_version'] = 2
     self.app.config.user_data.pop('galaxy_home', None)  # TODO: Galaxy home is always reset to default. Discuss implications
     return True
Exemplo n.º 3
0
 def __init__(self, app, instance_role='master'):
     super(PSSService, self).__init__(app)
     self.svc_roles = [ServiceRole.PSS]
     self.name = ServiceRole.to_string(ServiceRole.PSS)
     self.svc_type = ServiceType.CM_SERVICE
     self.instance_role = instance_role
     log.debug("Configured PSS as {0}".format(self.instance_role))
Exemplo n.º 4
0
 def __init__(self, app, instance_role='master'):
     super(PSSService, self).__init__(app)
     self.svc_roles = [ServiceRole.PSS]
     self.name = ServiceRole.to_string(ServiceRole.PSS)
     self.svc_type = ServiceType.CM_SERVICE
     self.instance_role = instance_role
     log.debug("Configured PSS as {0}".format(self.instance_role))
Exemplo n.º 5
0
    def __init__(self, app):
        super(ClouderaManagerService, self).__init__(app)
        self.svc_roles = [ServiceRole.CLOUDERA_MANAGER]
        self.name = ServiceRole.to_string(ServiceRole.CLOUDERA_MANAGER)
        self.dependencies = []
        self.remaining_start_attempts = NUM_START_ATTEMPTS
        self.db_pwd = misc.random_string_generator()
        # Indicate if the web server has been configured and started
        self.started = False
        self.cm_port = 7180

        # Default cluster configuration
        # TODO - read local cloud host name!
        # self.cm_host = socket.gethostname()
        self.cm_host = self.app.cloud_interface.get_local_hostname()
        self.host_list = []
        self.cluster_name = "Cluster 1"
        self.cdh_version = "CDH5"
        self.cdh_version_number = "5"
        self.cm_username = "******"
        self.cm_password = "******"
        self.mgmt_service_name = "ManagementService"
        self.host_username = "******"
        self.host_password = self.app.config.get('password')
        self.cm_repo_url = None
        self.service_types_and_names = {
            "HDFS": "HDFS",
            "YARN": "YARN"
        }
Exemplo n.º 6
0
    def __init__(self, app):
        super(ClouderaManagerService, self).__init__(app)
        self.svc_roles = [ServiceRole.CLOUDERA_MANAGER]
        self.name = ServiceRole.to_string(ServiceRole.CLOUDERA_MANAGER)
        self.dependencies = []
        self.db_pwd = misc.random_string_generator()
        # Indicate if the web server has been configured and started
        self.started = False
        self.cm_port = 7180

        # Default cluster configuration
        self.cm_host = socket.gethostname()
        self.host_list = [self.cm_host]
        self.cluster_name = "Cluster 1"
        self.cdh_version = "CDH5"
        self.cdh_version_number = "5"
        self.cm_username = "******"
        self.cm_password = "******"
        self.cm_service_name = "ManagementService"
        self.host_username = "******"
        # Read the password from the system!
        self.host_password = self.app.config.get('password')
        self.cm_repo_url = None
        self.service_types_and_names = {
            "HDFS": "HDFS",
            "YARN": "YARN",
            "ZOOKEEPER": "ZooKeeper"
        }
Exemplo n.º 7
0
 def __init__(self, app):
     super(PostgresService, self).__init__(app)
     self.name = ServiceRole.to_string(ServiceRole.GALAXY_POSTGRES)
     self.svc_roles = [ServiceRole.GALAXY_POSTGRES]
     self.psql_port = app.path_resolver.psql_db_port
     self.dependencies = [ServiceDependency(self, ServiceRole.GALAXY_DATA),
                          ServiceDependency(self, ServiceRole.MIGRATION)]
Exemplo n.º 8
0
 def __init__(self, app):
     super(HadoopService, self).__init__(app)
     self.svc_roles = [ServiceRole.HADOOP]
     self.name = ServiceRole.to_string(ServiceRole.HADOOP)
     self.dependencies = [ServiceDependency(self, ServiceRole.SGE)]
     self.id_rsa_path = os.path.join(paths.P_HADOOP_HOME, "id_rsa")
     self.id_rsa_pub_key_path = os.path.join(paths.P_HADOOP_HOME,
                                             "id_rsa.pub")
Exemplo n.º 9
0
 def __init__(self, app):
     super(HadoopService, self).__init__(app)
     self.svc_roles = [ServiceRole.HADOOP]
     self.name = ServiceRole.to_string(ServiceRole.HADOOP)
     self.dependencies = [ServiceDependency(self, ServiceRole.SGE)]
     self.id_rsa_path = os.path.join(paths.P_HADOOP_HOME, "id_rsa")
     self.id_rsa_pub_key_path = os.path.join(
         paths.P_HADOOP_HOME, "id_rsa.pub")
Exemplo n.º 10
0
    def __init__(self, app):
        super(MigrationService, self).__init__(app)

        self.svc_roles = [ServiceRole.MIGRATION]
        self.name = ServiceRole.to_string(ServiceRole.MIGRATION)

        self.dependencies = []

        if 'filesystems' in self.app.ud:
            for fs in self.app.ud.get('filesystems') or []:
                # Wait for galaxy data, indices and tools to come up before attempting migration
                if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(fs['roles']):
                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_DATA))
                if ServiceRole.GALAXY_TOOLS in ServiceRole.from_string_array(fs['roles']):
                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_TOOLS))
                if ServiceRole.GALAXY_INDICES in ServiceRole.from_string_array(fs['roles']):
                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_INDICES))
Exemplo n.º 11
0
    def __init__(self, app):
        super(MigrationService, self).__init__(app)

        self.svc_roles = [ServiceRole.MIGRATION]
        self.name = ServiceRole.to_string(ServiceRole.MIGRATION)
        self.svc_type = ServiceType.CM_SERVICE
        self.dependencies = []

        if 'filesystems' in self.app.config.user_data:
            for fs in self.app.config.user_data.get('filesystems') or []:
                # Wait for galaxy data, indices and tools to come up before attempting migration
                if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(fs['roles']):
                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_DATA))
                if ServiceRole.GALAXY_TOOLS in ServiceRole.from_string_array(fs['roles']):
                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_TOOLS))
                if ServiceRole.GALAXY_INDICES in ServiceRole.from_string_array(fs['roles']):
                    self.dependencies.append(ServiceDependency(self, ServiceRole.GALAXY_INDICES))
Exemplo n.º 12
0
 def __init__(self, app):
     super(SlurmdService, self).__init__(app)
     self.svc_roles = [ServiceRole.SLURMD]
     self.name = ServiceRole.to_string(ServiceRole.SLURMD)
     self.dependencies = [
         ServiceDependency(self, ServiceRole.SLURMCTLD),
     ]
     self.num_restarts = 0
     self.max_restarts = 3
Exemplo n.º 13
0
 def __init__(self, app):
     super(PulsarService, self).__init__(app)
     self.pulsar_home = None
     self.pulsar_port = DEFAULT_PULSAR_PORT
     self.name = ServiceRole.to_string(ServiceRole.PULSAR)
     self.svc_roles = [ServiceRole.PULSAR]
     self.dependencies = [
         ServiceDependency(self, ServiceRole.JOB_MANAGER)
     ]
Exemplo n.º 14
0
 def __init__(self, app):
     super(CloudgeneService, self).__init__(app)
     self.svc_roles = [ServiceRole.CLOUDGENE]
     self.name = ServiceRole.to_string(ServiceRole.CLOUDGENE)
     self.dependencies = [ServiceDependency(self, ServiceRole.CLOUDERA_MANAGER)]
     self.port = 8085
     self.cg_url = "http://cloudgene.uibk.ac.at/downloads/cloudgene-cloudman.tar.gz"
     self.cg_base_dir = '/mnt/galaxy/cloudgene/'
     self.cg_home = os.path.join(self.cg_base_dir, 'cloudgene-cloudman')
Exemplo n.º 15
0
 def __init__(self, app):
     super(SlurmdService, self).__init__(app)
     self.svc_roles = [ServiceRole.SLURMD]
     self.name = ServiceRole.to_string(ServiceRole.SLURMD)
     self.dependencies = [
         ServiceDependency(self, ServiceRole.SLURMCTLD),
     ]
     self.num_restarts = 0
     self.max_restarts = 3
Exemplo n.º 16
0
 def __init__(self, app):
     super(CloudgeneService, self).__init__(app)
     self.svc_roles = [ServiceRole.CLOUDGENE]
     self.name = ServiceRole.to_string(ServiceRole.CLOUDGENE)
     self.dependencies = [ServiceDependency(self, ServiceRole.CLOUDERA_MANAGER)]
     self.port = 8085
     self.cg_url = "https://cloudman.s3.amazonaws.com/files/cloudgene-cloudman-daemon.tar.gz"
     self.cg_base_dir = '/mnt/galaxy/cloudgene/'
     self.cg_home = os.path.join(self.cg_base_dir, 'cloudgene-daemon')
Exemplo n.º 17
0
 def __init__(self, app):
     super(CloudgeneService, self).__init__(app)
     self.svc_roles = [ServiceRole.CLOUDGENE]
     self.name = ServiceRole.to_string(ServiceRole.CLOUDGENE)
     self.dependencies = [ServiceDependency(self, ServiceRole.CLOUDERA_MANAGER)]
     self.port = 8085
     self.cg_url = "https://cloudman.s3.amazonaws.com/files/cloudgene-cloudman-daemon.tar.gz"
     self.cg_base_dir = '/mnt/galaxy/cloudgene/'
     self.cg_home = os.path.join(self.cg_base_dir, 'cloudgene-daemon')
Exemplo n.º 18
0
 def __init__(self, app):
     super(NodejsProxyService, self).__init__(app)
     self.svc_roles = [ServiceRole.NODEJSPROXY]
     self.name = ServiceRole.to_string(ServiceRole.NODEJSPROXY)
     self.dependencies = [ServiceDependency(self, ServiceRole.GALAXY_TOOLS),
                          ServiceDependency(self, ServiceRole.SUPERVISOR)]
     self.np_port = 8800
     self.supervisor_conf_dir = '/etc/supervisor/conf.d'
     self.supervisor_prog_name = 'galaxy_nodejs_proxy'
Exemplo n.º 19
0
 def __init__(self, app):
     """
     Initialize the service class by setting role and indicating dependencies
     """
     super(ProFTPdService, self).__init__(app)
     log.debug("Initializing ProFTPdService")
     self.svc_roles = [ServiceRole.PROFTPD]
     self.name = ServiceRole.to_string(ServiceRole.PROFTPD)
     self.dependencies = [ServiceDependency(self, ServiceRole.GALAXY_POSTGRES),
                          ServiceDependency(self, ServiceRole.GALAXY_DATA)]
Exemplo n.º 20
0
 def __init__(self, app, as_min=-1, as_max=-1, instance_type=None):
     super(AutoscaleService, self).__init__(app)
     self.state = service_states.UNSTARTED
     self.svc_roles = [ServiceRole.AUTOSCALE]
     self.svc_type = ServiceType.CM_SERVICE
     self.name = ServiceRole.to_string(ServiceRole.AUTOSCALE)
     self.dependencies = [ServiceDependency(self, ServiceRole.MIGRATION)]
     self.as_max = as_max  # Max number of nodes autoscale should maintain
     self.as_min = as_min  # Min number of nodes autoscale should maintain
     self.instance_type = instance_type  # Type of instances to start
Exemplo n.º 21
0
 def __init__(self, app, as_min=-1, as_max=-1, instance_type=None):
     super(AutoscaleService, self).__init__(app)
     self.state = service_states.UNSTARTED
     self.svc_roles = [ServiceRole.AUTOSCALE]
     self.svc_type = ServiceType.CM_SERVICE
     self.name = ServiceRole.to_string(ServiceRole.AUTOSCALE)
     self.dependencies = [ServiceDependency(self, ServiceRole.MIGRATION)]
     self.as_max = as_max  # Max number of nodes autoscale should maintain
     self.as_min = as_min  # Min number of nodes autoscale should maintain
     self.instance_type = instance_type  # Type of instances to start
Exemplo n.º 22
0
 def __init__(self, app):
     super(LwrService, self).__init__(app)
     self.lwr_home = self.app.path_resolver.lwr_home
     self.lwr_port = DEFAULT_LWR_PORT
     self.name = ServiceRole.to_string(ServiceRole.LWR)
     self.svc_roles = [ServiceRole.LWR]
     self.dependencies = [
         ServiceDependency(self, ServiceRole.SGE),  # Well someday anyway :)
         ServiceDependency(self, ServiceRole.GALAXY_TOOLS)  # Anyway to make this depend on where LWR installed?
     ]
Exemplo n.º 23
0
 def __init__(self, app):
     super(GalaxyReportsService, self).__init__(app)
     self.galaxy_home = self.app.path_resolver.galaxy_home
     self.reports_port = DEFAULT_REPORTS_PORT
     self.name = ServiceRole.to_string(ServiceRole.GALAXY_REPORTS)
     self.svc_roles = [ServiceRole.GALAXY_REPORTS]
     self.dependencies = [ServiceDependency(
         self, ServiceRole.GALAXY)]  # Hopefully Galaxy dependency alone enough to ensure database migrated, etc...
     self.conf_dir = os.path.join(
         self.app.path_resolver.galaxy_home, 'reports.conf.d')
Exemplo n.º 24
0
 def __init__(self, app, instance_role='master'):
     super(PSSService, self).__init__(app)
     self.svc_roles = [ServiceRole.PSS]
     self.name = ServiceRole.to_string(ServiceRole.PSS)
     self.instance_role = instance_role
     log.debug("Configured PSS as {0}".format(self.instance_role))
     # Name of the default script to run
     self.pss_filename = 'post_start_script' if self.instance_role == 'master' \
         else 'worker_post_start_script'
     self.pss_url = self.app.ud.get('post_start_script_url', None) if self.instance_role == 'master' \
         else self.app.ud.get('worker_post_start_script_url', None)
Exemplo n.º 25
0
 def __init__(self, app):
     super(SupervisorService, self).__init__(app)
     self.svc_roles = [ServiceRole.SUPERVISOR]
     self.name = ServiceRole.to_string(ServiceRole.SUPERVISOR)
     self.dependencies = []
     self.sv_port = 9002
     self.pid_file = '/var/run/supervisord.pid'
     self.main_conf_file = '/etc/supervisord.conf'
     self.conf_dir = '/etc/supervisor/conf.d'
     self.server = None
     self.log_file = '/var/log/supervisor/supervisord.log'
Exemplo n.º 26
0
 def __init__(self, filesystem, from_archive=None):
     """
     Instance's transient storage exposed over NFS.
     """
     super(TransientStorage, self).__init__(filesystem.app)
     self.fs = filesystem
     self.app = self.fs.app
     self.device = None
     self.from_archive = from_archive
     self.svc_roles = [ServiceRole.TRANSIENT_NFS]
     self.name = ServiceRole.to_string(ServiceRole.TRANSIENT_NFS)
Exemplo n.º 27
0
 def __init__(self, app):
     super(SupervisorService, self).__init__(app)
     self.svc_roles = [ServiceRole.SUPERVISOR]
     self.name = ServiceRole.to_string(ServiceRole.SUPERVISOR)
     self.dependencies = []
     self.sv_port = 9002
     self.pid_file = '/var/run/supervisord.pid'
     self.main_conf_file = '/etc/supervisord.conf'
     self.conf_dir = '/etc/supervisor/conf.d'
     self.server = None
     self.log_file = '/var/log/supervisor/supervisord.log'
Exemplo n.º 28
0
 def __init__(self, filesystem, from_archive=None):
     """
     Instance's transient storage exposed over NFS.
     """
     super(TransientStorage, self).__init__(filesystem.app)
     self.fs = filesystem
     self.app = self.fs.app
     self.device = None
     self.from_archive = from_archive
     self.svc_roles = [ServiceRole.TRANSIENT_NFS]
     self.name = ServiceRole.to_string(ServiceRole.TRANSIENT_NFS)
Exemplo n.º 29
0
 def __init__(self, app):
     super(NodejsProxyService, self).__init__(app)
     self.svc_roles = [ServiceRole.NODEJSPROXY]
     self.name = ServiceRole.to_string(ServiceRole.NODEJSPROXY)
     self.dependencies = [
         ServiceDependency(self, ServiceRole.GALAXY_TOOLS),
         ServiceDependency(self, ServiceRole.SUPERVISOR)
     ]
     self.np_port = 8800
     self.supervisor_conf_dir = '/etc/supervisor/conf.d'
     self.supervisor_prog_name = 'galaxy_nodejs_proxy'
Exemplo n.º 30
0
 def __init__(self, app):
     super(PulsarService, self).__init__(app)
     self.name = ServiceRole.to_string(ServiceRole.PULSAR)
     self.svc_roles = [ServiceRole.PULSAR]
     self.dependencies = [
         ServiceDependency(self, ServiceRole.JOB_MANAGER),
         ServiceDependency(self, ServiceRole.SUPERVISOR)
     ]
     self.pulsar_home = '/mnt/pulsar'
     self.pulsar_port = 8913
     self.supervisor_conf_dir = '/etc/supervisor/conf.d'
     self.supervisor_prog_name = 'pulsar'
Exemplo n.º 31
0
 def __init__(self, app):
     super(PulsarService, self).__init__(app)
     self.name = ServiceRole.to_string(ServiceRole.PULSAR)
     self.svc_roles = [ServiceRole.PULSAR]
     self.dependencies = [
         ServiceDependency(self, ServiceRole.JOB_MANAGER),
         ServiceDependency(self, ServiceRole.SUPERVISOR)
     ]
     self.pulsar_home = '/mnt/pulsar'
     self.pulsar_port = 8913
     self.supervisor_conf_dir = '/etc/supervisor/conf.d'
     self.supervisor_prog_name = 'pulsar'
Exemplo n.º 32
0
 def __init__(self, app):
     """
     Initialize the service class by setting role and indicating dependencies
     """
     super(ProFTPdService, self).__init__(app)
     log.debug("Initializing ProFTPdService")
     self.svc_roles = [ServiceRole.PROFTPD]
     self.name = ServiceRole.to_string(ServiceRole.PROFTPD)
     self.dependencies = [
         ServiceDependency(self, ServiceRole.GALAXY_POSTGRES),
         ServiceDependency(self, ServiceRole.GALAXY_DATA)
     ]
Exemplo n.º 33
0
 def __init__(self, app):
     super(GalaxyReportsService, self).__init__(app)
     self.galaxy_home = self.app.path_resolver.galaxy_home
     self.reports_port = DEFAULT_REPORTS_PORT
     self.name = ServiceRole.to_string(ServiceRole.GALAXY_REPORTS)
     self.svc_roles = [ServiceRole.GALAXY_REPORTS]
     # Hopefully Galaxy dependency alone enough to ensure database migrated, etc...
     self.dependencies = [
         ServiceDependency(self, ServiceRole.GALAXY),
         ServiceDependency(self, ServiceRole.GALAXY_POSTGRES)
     ]
     self.conf_dir = os.path.join(self.app.path_resolver.galaxy_home,
                                  'reports.conf.d')
Exemplo n.º 34
0
 def __init__(self, app):
     super(NginxService, self).__init__(app)
     self.svc_roles = [ServiceRole.NGINX]
     self.name = ServiceRole.to_string(ServiceRole.NGINX)
     self.dependencies = []
     self.exe = self.app.path_resolver.nginx_executable
     self.conf_dir = self.app.path_resolver.nginx_conf_dir
     self.conf_file = self.app.path_resolver.nginx_conf_file  # Main conf file
     self.ssl_is_on = False
     # The list of services that Nginx service proxies
     self.proxied_services = ['Galaxy', 'GalaxyReports', 'Pulsar',
                              'ClouderaManager', 'Cloudgene']
     # A list of currently active CloudMan services being proxied
     self.active_proxied = []
Exemplo n.º 35
0
 def __init__(self, app):
     super(NginxService, self).__init__(app)
     self.svc_roles = [ServiceRole.NGINX]
     self.name = ServiceRole.to_string(ServiceRole.NGINX)
     self.dependencies = []
     self.exe = self.app.path_resolver.nginx_executable
     self.conf_dir = self.app.path_resolver.nginx_conf_dir
     self.conf_file = self.app.path_resolver.nginx_conf_file  # Main conf file
     self.ssl_is_on = self.app.config.user_data.get('use_ssl', False)
     self.proxied_services = ['Galaxy', 'GalaxyReports', 'Pulsar',
                              'ClouderaManager', 'Cloudgene']
     # A list of currently active CloudMan services being proxied
     self.active_proxied = []
     self.was_started = False
Exemplo n.º 36
0
 def __init__(self, app, srv_type="master", host=""):
     """
     the srv_type defines whether we are running a master node or a
     worker node. If we have run a worker the host IP should be passed
     in the host entry.
     """
     super(HTCondorService, self).__init__(app)
     log.debug("Condor is preparing")
     self.svc_roles = [ServiceRole.HTCONDOR]
     self.name = ServiceRole.to_string(ServiceRole.HTCONDOR)
     self.srv_type = srv_type
     if self.srv_type == "master":
         self.flock_to = ""
     else:
         self.host = host
Exemplo n.º 37
0
 def __init__(self, app, srv_type="master", host=""):
     """
     the srv_type defines whether we are running a master node or a
     worker node. If we have run a worker the host IP should be passed
     in the host entry.
     """
     super(HTCondorService, self).__init__(app)
     log.debug("Condor is preparing")
     self.svc_roles = [ServiceRole.HTCONDOR]
     self.name = ServiceRole.to_string(ServiceRole.HTCONDOR)
     self.srv_type = srv_type
     if self.srv_type == "master":
         self.flock_to = ""
     else:
         self.host = host
Exemplo n.º 38
0
    def __init__(self, app):
        super(ClouderaManagerService, self).__init__(app)
        self.svc_roles = [ServiceRole.CLOUDERA_MANAGER]
        self.name = ServiceRole.to_string(ServiceRole.CLOUDERA_MANAGER)
        self.dependencies = []
        self.remaining_start_attempts = NUM_START_ATTEMPTS
        self.db_pwd = misc.random_string_generator()
        # Indicate if the web server has been configured and started
        self.started = False
        self.cm_port = 7180

        # Default cluster configuration
        # TODO - read local cloud host name!
        # self.cm_host = socket.gethostname()
        self.cm_host = self.app.cloud_interface.get_local_hostname()
        # The actual worker nodes (note: this is a list of Instance objects)
        # (because get_worker_instances currently depends on tags, which is only
        # supported by EC2, get the list of instances only for the case of EC2 cloud.
        # This initialization is applicable only when restarting a cluster.
        # self.host_list = get_worker_instances() if (
        #    self.app.cloud_type == 'ec2' or self.app.cloud_type == 'openstack') else []
        # self.host_list = ["w1", "w2"]
        # self.instances = self.app.manager.worker_instances
        # self.host_list = [l.get_local_hostname() for l in self.app.manager.worker_instances]
        # self.host_list = [l.get_private_ip for l in self.app.manager.worker_instances]
        self.host_list = None
        self.cluster_name = "Cluster 1"
        self.cdh_version = "CDH5"
        self.cdh_version_number = "5"
        self.cm_username = "******"
        self.cm_password = "******"
        self.mgmt_service_name = "ManagementService"
        self.host_username = "******"
        self.host_password = self.app.config.get('password')
        self.cm_repo_url = None
        self.hdfs_service_name = "HDFS"
        self.hadoop_data_dir_prefix = "/mnt/dfs"
        self.yarn_service_name = "YARN"
        self.parcel_version = "5.4.1"
        self.cmd_timeout = 180
        self.api = None
        self.manager = None
        self.cluster = None
        self.hdfs_service = None
        self.yarn_service = None
        self.service_types_and_names = {"HDFS": "HDFS", "YARN": "YARN"}
Exemplo n.º 39
0
 def __init__(self, app):
     super(SlurmctldService, self).__init__(app)
     self.svc_roles = [ServiceRole.SLURMCTLD, ServiceRole.JOB_MANAGER]
     self.name = ServiceRole.to_string(ServiceRole.SLURMCTLD)
     self.dependencies = [
         ServiceDependency(self, ServiceRole.MIGRATION),
         ServiceDependency(self, ServiceRole.TRANSIENT_NFS),
     ]
     self.slurm_info = SlurmInfo()
     self.num_restarts = 0
     self.max_restarts = 3
     # This must be the same on the workers
     self.slurm_lock_file = os.path.join(
         self.app.path_resolver.slurm_root_nfs, 'slurm.lockfile')
     # Following a cluster reboot, this file may have been left over so
     # clean it up before starting the service
     if os.path.exists(self.slurm_lock_file):
         os.remove(self.slurm_lock_file)
Exemplo n.º 40
0
 def __init__(self, app):
     super(SlurmctldService, self).__init__(app)
     self.svc_roles = [ServiceRole.SLURMCTLD, ServiceRole.JOB_MANAGER]
     self.name = ServiceRole.to_string(ServiceRole.SLURMCTLD)
     self.dependencies = [
         ServiceDependency(self, ServiceRole.MIGRATION),
         ServiceDependency(self, ServiceRole.TRANSIENT_NFS),
     ]
     self.slurm_info = SlurmInfo()
     self.num_restarts = 0
     self.max_restarts = 3
     # This must be the same on the workers
     self.slurm_lock_file = os.path.join(self.app.path_resolver.slurm_root_nfs,
                                         'slurm.lockfile')
     # Following a cluster reboot, this file may have been left over so
     # clean it up before starting the service
     if os.path.exists(self.slurm_lock_file):
         os.remove(self.slurm_lock_file)
Exemplo n.º 41
0
 def __init__(self, app):
     super(GalaxyService, self).__init__(app)
     self.name = ServiceRole.to_string(ServiceRole.GALAXY)
     self.svc_roles = [ServiceRole.GALAXY]
     self.remaining_start_attempts = NUM_START_ATTEMPTS
     self.configured = False  # Indicates if the environment for running Galaxy has been configured
     self.ssl_is_on = False
     # Environment variables to set before executing galaxy's run.sh
     self.env_vars = {}
     self.dependencies = [
         ServiceDependency(self, ServiceRole.JOB_MANAGER),
         ServiceDependency(self, ServiceRole.GALAXY_POSTGRES),
         ServiceDependency(self, ServiceRole.GALAXY_DATA),
         ServiceDependency(self, ServiceRole.GALAXY_INDICES),
         ServiceDependency(self, ServiceRole.GALAXY_TOOLS),
         ServiceDependency(self, ServiceRole.PROFTPD)
     ]
     self.option_manager = galaxy_option_manager(app)
Exemplo n.º 42
0
 def __init__(self, app):
     super(GalaxyService, self).__init__(app)
     self.name = ServiceRole.to_string(ServiceRole.GALAXY)
     self.svc_roles = [ServiceRole.GALAXY]
     self.remaining_start_attempts = NUM_START_ATTEMPTS
     # Indicates if the environment for running Galaxy has been configured
     self.configured = False
     self.ssl_is_on = False
     # Environment variables to set before executing galaxy's run.sh
     self.env_vars = {}
     self.dependencies = [
         ServiceDependency(self, ServiceRole.JOB_MANAGER),
         ServiceDependency(self, ServiceRole.GALAXY_POSTGRES),
         ServiceDependency(self, ServiceRole.GALAXY_DATA),
         ServiceDependency(self, ServiceRole.GALAXY_INDICES),
         # ServiceDependency(self, ServiceRole.PROFTPD),
         ServiceDependency(self, ServiceRole.GALAXY_TOOLS)
     ]
     self.option_manager = galaxy_option_manager(app)
Exemplo n.º 43
0
 def __init__(self, app):
     super(GalaxyService, self).__init__(app)
     self.name = ServiceRole.to_string(ServiceRole.GALAXY)
     self.svc_roles = [ServiceRole.GALAXY]
     self.remaining_start_attempts = NUM_START_ATTEMPTS
     self.configured = False  # Indicates if the environment for running Galaxy has been configured
     # Environment variables to set before executing galaxy's run.sh
     self.env_vars = {
         "SGE_ROOT": self.app.path_resolver.sge_root,
         "DRMAA_LIBRARY_PATH": self.app.path_resolver.drmaa_library_path
     }
     self.dependencies = [
         ServiceDependency(self, ServiceRole.SGE),
         ServiceDependency(self, ServiceRole.GALAXY_POSTGRES),
         ServiceDependency(self, ServiceRole.GALAXY_DATA),
         ServiceDependency(self, ServiceRole.GALAXY_INDICES),
         ServiceDependency(self, ServiceRole.GALAXY_TOOLS),
         ServiceDependency(self, ServiceRole.PROFTPD)
     ]
     self.option_manager = galaxy_option_manager(app)
Exemplo n.º 44
0
    def __init__(self,
                 app,
                 as_min=-1,
                 as_max=-1,
                 instance_type=None,
                 num_queued_jobs=2,
                 mean_runtime_threshold=60,
                 num_instances_to_add=1):
        """
        :type as_min: int
        :param as_min: The minimum number of worker nodes of maintain.

        :type as_max: int
        :param as_max: The maximum number of worker nodes to maintain.

        :type instance_type: str
        :param instance_type: The type of instance to use.

        :type num_queued_jobs: int
        :param num_queued_jobs: Minimum number of jobs that need to be queued
                                before autoscaling will trigger.

        :type mean_runtime_threshold: int
        :param mean_runtime_threshold: Mean running job runtime before
                                       autoscaling will trigger.

        :type num_instances_to_add: int
        :param num_instances_to_add: Number of instances to add when scaling up.
        """
        super(AutoscaleService, self).__init__(app)
        self.state = service_states.UNSTARTED
        self.svc_roles = [ServiceRole.AUTOSCALE]
        self.svc_type = ServiceType.CM_SERVICE
        self.name = ServiceRole.to_string(ServiceRole.AUTOSCALE)
        self.dependencies = [ServiceDependency(self, ServiceRole.MIGRATION)]
        self.as_max = as_max
        self.as_min = as_min
        self.instance_type = instance_type
Exemplo n.º 45
0
 def send_add_s3fs(self, bucket_name, svc_roles):
     msg = 'ADDS3FS | {0} | {1}'.format(bucket_name, ServiceRole.to_string(svc_roles))
     self._send_msg(msg)
Exemplo n.º 46
0
            log.debug("Tried to create a volume but it is in state '%s' (volume ID: %s)" %
                      (self.status, self.volume_id))

        # Add tags to newly created volumes (do this outside the inital if/else
        # to ensure the tags get assigned even if using an existing volume vs.
        # creating a new one)
        try:
            self.app.cloud_interface.add_tag(
                self.volume, 'clusterName', self.app.config['cluster_name'])
            self.app.cloud_interface.add_tag(
                self.volume, 'bucketName', self.app.config['bucket_cluster'])
            if filesystem:
                self.app.cloud_interface.add_tag(self.volume, 'filesystem', filesystem)
                self.app.cloud_interface.add_tag(self.volume, 'Name', "{0}FS".format(filesystem))
                self.app.cloud_interface.add_tag(self.volume, 'roles',
                                                 ServiceRole.to_string(self.fs.svc_roles))
        except EC2ResponseError, e:
            log.error("Error adding tags to volume: %s" % e)

    def delete(self):
        """
        Delete this volume.
        """
        try:
            volume_id = self.volume_id
            self.volume.delete()
            log.debug("Deleted volume '%s'" % volume_id)
            self.volume = None
        except EC2ResponseError, e:
            log.error("Error deleting volume '%s' - you should delete it manually "
                      "after the cluster has shut down: %s" % (self.volume_id, e))
Exemplo n.º 47
0
 def __init__(self, app):
     super(SGEService, self).__init__(app)
     self.svc_roles = [ServiceRole.SGE, ServiceRole.JOB_MANAGER]
     self.name = ServiceRole.to_string(ServiceRole.SGE)
     self.dependencies = [ServiceDependency(self, ServiceRole.MIGRATION)]
     self.sge_info = SGEInfo()
Exemplo n.º 48
0
 def __init__(self, app):
     super(SGEService, self).__init__(app)
     self.svc_roles = [ServiceRole.SGE, ServiceRole.JOB_MANAGER]
     self.name = ServiceRole.to_string(ServiceRole.SGE)
     self.dependencies = [ServiceDependency(self, ServiceRole.MIGRATION)]
     self.sge_info = SGEInfo()
Exemplo n.º 49
0
def normalize_user_data(app, ud):
    """
    Normalize user data format to a consistent representation used within CloudMan.

    This is useful as user data and also persistent data evolve over time and thus
    calling this method at app start enables any necessary translation to happen.
    """
    if ud.get('persistent_data_version', 1) < app.PERSISTENT_DATA_VERSION:
        # First make a backup of the deprecated persistent data file
        s3_conn = app.cloud_interface.get_s3_connection()
        copy_file_in_bucket(
            s3_conn, ud['bucket_cluster'], ud['bucket_cluster'],
            'persistent_data.yaml', 'persistent_data-deprecated.yaml', preserve_acl=False,
            validate=False)
        # Convert (i.e., normalize) v2 ud
        if 'filesystems' in ud:
            log.debug("Normalizing v2 user data")
            for fs in ud['filesystems']:
                if 'roles' not in fs:
                    fs['roles'] = ServiceRole.legacy_convert(fs['name'])
                if 'delete_on_termination' not in fs:
                    if fs['kind'] == 'snapshot':
                        fs['delete_on_termination'] = True
                    else:
                        fs['delete_on_termination'] = False
            for svc in ud.get('services', []):
                if 'roles' not in svc:
                    svc['roles'] = ServiceRole.legacy_convert(
                        svc.get('name', 'NoName'))
        # Convert (i.e., normalize) v1 ud
        if "static_filesystems" in ud or "data_filesystems" in ud:
            log.debug("Normalizing v1 user data")
            if 'filesystems' not in ud:
                ud['filesystems'] = []
            if 'static_filesystems' in ud:
                for vol in ud['static_filesystems']:
                    # Create a mapping between the old and the new format styles
                    # Some assumptions are made here; namely, all static file systems
                    # in the original data are assumed delete_on_termination, their name
                    # defines their role and they are mounted under /mnt/<name>
                    roles = ServiceRole.legacy_convert(vol['filesystem'])
                    fs = {'kind': 'snapshot', 'name': vol['filesystem'],
                          'roles': roles, 'delete_on_termination': True,
                          'mount_point': os.path.join('/mnt', vol['filesystem']),
                          'ids': [vol['snap_id']]}
                    ud['filesystems'].append(fs)
                ud.pop('static_filesystems')
                ud['cluster_type'] = 'Galaxy'
            if 'data_filesystems' in ud:
                for fs_name, fs in ud['data_filesystems'].items():
                    fs = {'kind': 'volume', 'name': fs_name,
                          'roles': ServiceRole.legacy_convert(fs_name), 'delete_on_termination': False,
                          'mount_point': os.path.join('/mnt', fs_name),
                          'ids': [fs[0]['vol_id']]}
                    ud['filesystems'].append(fs)
                ud.pop('data_filesystems')
                if 'cluster_type' not in ud:
                    ud['cluster_type'] = 'Data'
            if 'galaxy_home' in ud:
                ud.pop('galaxy_home')
        if 'services' in ud and 'service' in ud['services'][0]:
            log.debug("Normalizing v1 service user data")
            old_svc_list = ud['services']
            ud['services'] = []
            # clear 'services' and replace with the new format
            for svc in old_svc_list:
                if 'roles' not in svc:
                    normalized_svc = {'name': svc['service'], 'roles':
                                      ServiceRole.legacy_convert(svc['service'])}
                    ud['services'].append(normalized_svc)
    return ud
Exemplo n.º 50
0
def normalize_user_data(app, ud):
    """
    Normalize user data format to a consistent representation used within CloudMan.
    This is useful as user data and also persistent data evolve over time and thus
    calling this method at app start enables any necessary translation to happen.
    """
    if ud.get("persistent_data_version", 1) < app.PERSISTENT_DATA_VERSION:
        # First make a backup of the deprecated persistent data file
        s3_conn = app.cloud_interface.get_s3_connection()
        copy_file_in_bucket(
            s3_conn,
            ud["bucket_cluster"],
            ud["bucket_cluster"],
            "persistent_data.yaml",
            "persistent_data-deprecated.yaml",
            preserve_acl=False,
            validate=False,
        )
        # Convert (i.e., normalize) v2 ud
        if "filesystems" in ud:
            log.debug("Normalizing v2 user data")
            for fs in ud["filesystems"]:
                if "roles" not in fs:
                    fs["roles"] = ServiceRole.legacy_convert(fs["name"])
                if "delete_on_termination" not in fs:
                    if fs["kind"] == "snapshot":
                        fs["delete_on_termination"] = True
                    else:
                        fs["delete_on_termination"] = False
            for svc in ud.get("services", []):
                if "roles" not in svc:
                    svc["roles"] = ServiceRole.legacy_convert(svc.get("name", "NoName"))
        # Convert (i.e., normalize) v1 ud
        if "static_filesystems" in ud or "data_filesystems" in ud:
            log.debug("Normalizing v1 user data")
            if "filesystems" not in ud:
                ud["filesystems"] = []
            if "static_filesystems" in ud:
                for vol in ud["static_filesystems"]:
                    # Create a mapping between the old and the new format styles
                    # Some assumptions are made here; namely, all static file systems
                    # in the original data are assumed delete_on_termination, their name
                    # defines their role and they are mounted under /mnt/<name>
                    roles = ServiceRole.legacy_convert(vol["filesystem"])
                    fs = {
                        "kind": "snapshot",
                        "name": vol["filesystem"],
                        "roles": roles,
                        "delete_on_termination": True,
                        "mount_point": os.path.join("/mnt", vol["filesystem"]),
                        "ids": [vol["snap_id"]],
                    }
                    ud["filesystems"].append(fs)
                ud.pop("static_filesystems")
                ud["cluster_type"] = "Galaxy"
            if "data_filesystems" in ud:
                for fs_name, fs in ud["data_filesystems"].items():
                    fs = {
                        "kind": "volume",
                        "name": fs_name,
                        "roles": ServiceRole.legacy_convert(fs_name),
                        "delete_on_termination": False,
                        "mount_point": os.path.join("/mnt", fs_name),
                        "ids": [fs[0]["vol_id"]],
                    }
                    ud["filesystems"].append(fs)
                ud.pop("data_filesystems")
                if "cluster_type" not in ud:
                    ud["cluster_type"] = "Data"
            if "galaxy_home" in ud:
                ud.pop("galaxy_home")
        if "services" in ud and "service" in ud["services"][0]:
            log.debug("Normalizing v1 service user data")
            old_svc_list = ud["services"]
            ud["services"] = []
            # clear 'services' and replace with the new format
            for svc in old_svc_list:
                if "roles" not in svc:
                    normalized_svc = {"name": svc["service"], "roles": ServiceRole.legacy_convert(svc["service"])}
                    ud["services"].append(normalized_svc)
    return ud
Exemplo n.º 51
0
 def __init__(self, app):
     super(SGEService, self).__init__(app)
     self.svc_roles = [ServiceRole.SGE]
     self.name = ServiceRole.to_string(ServiceRole.SGE)
     self.dependencies = [ServiceDependency(self, ServiceRole.MIGRATION)]
     self.hosts = []
Exemplo n.º 52
0
 def send_add_s3fs(self, bucket_name, svc_roles):
     msg = 'ADDS3FS | {0} | {1}'.format(bucket_name, ServiceRole.to_string(svc_roles))
     self._send_msg(msg)
Exemplo n.º 53
0
    def create(self, filesystem=None):
        """
        Create a new volume.

        This can be done either from a snapshot (i.e., ``self.from_snapshot_id``)
        or a blank one if the snapshot ID is not set. Note that ``self.size``
        needs to be set if creating a blank volume before calling the method.
        If creating a volume from a snapshot and ``self.size`` is not set, the
        new volume will be of the same size as the snapshot.
        """
        if not self.size and not self.from_snapshot_id and not self.from_archive:
            log.error('Cannot add a {0} volume without a size, snapshot ID or '
                      'archive url; aborting.'.format(self.fs))
            return False
        # If creating the volume from a snaphost, get the expected volume size
        if self.from_snapshot_id and not self.volume:
            self.snapshot = self.app.cloud_interface.get_snapshot(
                self.from_snapshot_id)
            if not self.snapshot:
                log.error(
                    "Did not retrieve Snapshot object for {0}; aborting.".
                    format(self.from_snapshot_id))
                return False
            # We need a size to be able to create a volume, so if none
            # is specified, use snapshot size
            if not self.size:
                si = self.app.cloud_interface.get_snapshot_info(
                    self.from_snapshot_id)
                self.size = si.get('volume_size')
        # If it does not already exist, create the volume
        if self.status == volume_status.NONE:
            log.debug("Creating a new volume of size '%s' in zone '%s' from "
                      "snapshot '%s' for %s." %
                      (self.size, self.app.cloud_interface.get_zone(),
                       self.from_snapshot_id, self.fs))
            self.volume = self.app.cloud_interface.create_volume(
                self.size,
                self.app.cloud_interface.get_zone(),
                snapshot=self.from_snapshot_id)
            if self.volume:
                # When creating from a snapshot in Euca, volume.size may be None
                self.size = int(self.volume.size or 0)
                log.debug(
                    "Created a new volume of size '%s' from snapshot '%s' "
                    "with ID '%s' in zone '%s' for %s." %
                    (self.size, self.from_snapshot_id, self.volume_id,
                     self.app.cloud_interface.get_zone(), self.fs))
            else:
                log.warning("No volume object - did not create a volume?")
                return False
        else:
            log.debug(
                "Tried to create a volume for %s but it is in state '%s' "
                "(volume ID: %s)" % (self.fs, self.status, self.volume_id))
            return False
        # Add tags to newly created volumes (do this outside the inital if/else
        # to ensure the tags get assigned even if using an existing volume vs.
        # creating a new one)
        self.app.cloud_interface.add_tag(self.volume, 'Name',
                                         self.app.config['cluster_name'])
        self.app.cloud_interface.add_tag(self.volume, 'bucketName',
                                         self.app.config['bucket_cluster'])
        if self.fs:
            self.app.cloud_interface.add_tag(self.volume, 'filesystem',
                                             self.fs.get_full_name())
            self.app.cloud_interface.add_tag(
                self.volume, 'roles', ServiceRole.to_string(self.fs.svc_roles))
        return True