コード例 #1
0
ファイル: clouderamanager.py プロジェクト: kaktus42/cloudman
 def status(self):
     """
     Check and update the status of the service.
     """
     if self.state == service_states.UNSTARTED or \
        self.state == service_states.STARTING or \
        self.state == service_states.SHUTTING_DOWN or \
        self.state == service_states.SHUT_DOWN or \
        self.state == service_states.WAITING_FOR_USER_ACTION:
         return
     # Capture possible status messages from /etc/init.d/cloudera-scm-server
     status_output = ['is dead and pid file exists',
                      'is dead and lock file exists',
                      'is not running',
                      'status is unknown']
     svc_status = misc.getoutput('service cloudera-scm-server status', quiet=True)
     for so in status_output:
         if so in svc_status:
             log.warning("Cloudera server not running: {0}.".format(so))
             if self.remaining_start_attempts > 0:
                 log.debug("Resetting ClouderaManager service")
                 self.state = service_states.UNSTARTED
             else:
                 log.error("Exceeded number of restart attempts; "
                           "ClouderaManager service in ERROR.")
                 self.state = service_states.ERROR
     if not self.started:
         pass
     elif 'is running' in svc_status:
         self.state = service_states.RUNNING
         # Once the service gets running, reset the number of start attempts
         self.remaining_start_attempts = NUM_START_ATTEMPTS
コード例 #2
0
 def add(self):
     """
     Add this file system by creating a dedicated path (i.e., self.fs.mount_point)
     and exporting it over NFS. Set the owner of the repo as ``ubuntu`` user.
     """
     log.debug("Adding a transient FS at {0}".format(self.fs.mount_point))
     # transient_nfs gets created first - make sure it's on a dedicated device
     if self.fs.name == 'transient_nfs' and self.app.cloud_type == 'ec2':
         self._ensure_ephemeral_disk_mounted()
     misc.make_dir(self.fs.mount_point, owner='ubuntu')
     # Set the device ID
     cmd = "df %s | grep -v Filesystem | awk '{print $1}'" % self.fs.mount_point
     self.device = misc.getoutput(cmd)
     # If based on an archive, extract archive contents to the mount point
     if self.from_archive:
         self.fs.persistent = True
         self.fs.state = service_states.CONFIGURING
         # Extract the FS archive in a separate thread
         log.debug("Extracting transient FS {0} from an archive in a "
                   "dedicated thread.".format(self.get_full_name()))
         ExtractArchive(self.from_archive['url'], self.fs.mount_point,
                        self.from_archive['md5_sum'],
                        callback=self.fs.nfs_share_and_set_state).start()
     else:
         self.fs.nfs_share_and_set_state()
コード例 #3
0
 def add(self):
     """
     Add this file system by creating a dedicated path (i.e., self.fs.mount_point)
     and exporting it over NFS. Set the owner of the repo as ``ubuntu`` user.
     """
     log.debug("Adding a transient FS at {0}".format(self.fs.mount_point))
     misc.make_dir(self.fs.mount_point, owner='ubuntu')
     # Make the default instance transient storage group writable
     if self.fs.name == 'transient_nfs':
         misc.chmod(self.fs.mount_point, 0775)
     # Set the device ID
     cmd = "df %s | grep -v Filesystem | awk '{print $1}'" % self.fs.mount_point
     self.device = misc.getoutput(cmd)
     # If based on an archive, extract archive contents to the mount point
     if self.from_archive:
         # Do not overwrite an existing dir structure w/ the archive content.
         # This happens when a cluster is rebooted.
         if self.fs.name == 'galaxy' and os.path.exists(self.app.path_resolver.galaxy_home):
             log.debug("Galaxy home dir ({0}) already exists; not extracting "
                       "the archive ({1}) so not to overwrite it."
                       .format(self.app.path_resolver.galaxy_home, self.from_archive['url']))
             self.fs.nfs_share_and_set_state()
         else:
             self.fs.persistent = True
             self.fs.state = service_states.CONFIGURING
             # Extract the FS archive in a separate thread
             log.debug("Extracting transient FS {0} from an archive in a "
                       "dedicated thread.".format(self.get_full_name()))
             ExtractArchive(self.from_archive['url'], self.fs.mount_point,
                            self.from_archive['md5_sum'],
                            callback=self.fs.nfs_share_and_set_state).run()
     else:
         self.fs.nfs_share_and_set_state()
コード例 #4
0
ファイル: clouderamanager.py プロジェクト: inarihtam/cloudman
 def status(self):
     """
     Check and update the status of the service.
     """
     if self.state == service_states.UNSTARTED or \
        self.state == service_states.STARTING or \
        self.state == service_states.SHUTTING_DOWN or \
        self.state == service_states.SHUT_DOWN or \
        self.state == service_states.WAITING_FOR_USER_ACTION:
         return
     # Capture possible status messages from /etc/init.d/cloudera-scm-server
     status_output = ['is dead and pid file exists',
                      'is dead and lock file exists',
                      'is not running',
                      'status is unknown']
     svc_status = misc.getoutput('service cloudera-scm-server status', quiet=True)
     for so in status_output:
         if so in svc_status:
             log.warning("Cloudera server not running: {0}.".format(so))
             if self.remaining_start_attempts > 0:
                 log.debug("Resetting ClouderaManager service")
                 self.state = service_states.UNSTARTED
             else:
                 log.error("Exceeded number of restart attempts; "
                           "ClouderaManager service in ERROR.")
                 self.state = service_states.ERROR
     if not self.started:
         pass
     elif 'is running' in svc_status:
         self.state = service_states.RUNNING
         # Once the service gets running, reset the number of start attempts
         self.remaining_start_attempts = NUM_START_ATTEMPTS
コード例 #5
0
ファイル: nginx.py プロジェクト: nuwang/cloudman
 def get_installed_version(self):
     """
     Returns major.minor version of nginx as a tuple
     """
     version_str = misc.getoutput("{0} -v".format(self.exe))
     m = re.search('nginx\/(\d).(\d+)', version_str)
     return int(m.group(1)), int(m.group(2))
コード例 #6
0
ファイル: proftpd.py プロジェクト: blankenberg/cloudman
 def configure_proftpd(self):
     """
     Configure environment for running ProFTPd service.
     """
     log.debug("Configuring ProFTPd")
     # Because we're rewriting the proftpd config file below, update the
     # password for PostgreSQL galaxyftp user
     gftp_pwd = self.app.path_resolver.proftpd_galaxyftp_user_pwd
     log.debug("Setting psql password for galaxyftp role to {0}".format(gftp_pwd))
     # Check if galaxtftp role already exists
     cmd = ('{0} - postgres -c"{1} -p {2} -tAc\\\"SELECT 1 FROM pg_roles WHERE rolname=\'galaxyftp\'\\\""'
            .format(paths.P_SU, self.app.path_resolver.psql_cmd,
                    self.app.path_resolver.psql_db_port))
     role = 'ALTER' if misc.getoutput(cmd) == '1' else 'CREATE'
     # Now either CREATE or ALTER the galaxyftp role to set the permissions
     cmd = ('{0} - postgres -c"{1} -p {4} -c\\\"{2} ROLE galaxyftp LOGIN PASSWORD \'{3}\'\\\""'
            .format(paths.P_SU, self.app.path_resolver.psql_cmd, role,
                    gftp_pwd, self.app.path_resolver.psql_db_port))
     misc.run(cmd)
     # Update the config to match the current environment
     proftpd_tmplt = conf_manager.PROFTPD_CONF_TEMPLATE
     proftpd_conf_template = conf_manager.load_conf_template(proftpd_tmplt)
     params = {
         'galaxy_user_name': paths.GALAXY_USER_NAME,
         'galaxyftp_user_name': 'galaxyftp',
         'psql_galaxyftp_password': gftp_pwd,
         'galaxy_db_port': self.app.path_resolver.psql_db_port,
         'galaxyFS_base_path': self.app.path_resolver.galaxy_data,
         'public_ip_address': self.app.cloud_interface.get_public_ip()
     }
     template = proftpd_conf_template.substitute(params)
     # Write out the config file
     with open(self.app.path_resolver.proftpd_conf_file, 'w') as f:
         print >> f, template
     log.debug("Updated ProFTPd conf file {0}".format(
               self.app.path_resolver.proftpd_conf_file))
     # Place the FTP welcome message file
     urllib.urlretrieve("https://s3.amazonaws.com/cloudman/files/proftpd_welcome.txt",
                        "/usr/proftpd/etc/welcome_msg.txt")
     # Setup the Galaxy data dir for FTP
     ftp_data_dir = '%s/tmp/ftp' % self.app.path_resolver.galaxy_data
     if not os.path.exists(ftp_data_dir):
         os.makedirs(ftp_data_dir)
     attempt_chown_galaxy(ftp_data_dir)
     # Some images have vsFTPd server included so stop it first
     vsFTPd_exists = misc.run('status vsftpd', quiet=True)
     if vsFTPd_exists and 'start/running' in vsFTPd_exists:
         log.debug("Stopping vsFTPd")
         misc.run('stop vsftpd')
     # Start the server now
     if misc.run('/etc/init.d/proftpd start'):
         self.state = service_states.RUNNING
         return True
     else:
         log.error("Trouble starting ProFTPd")
         self.state = service_states.ERROR
         return False
コード例 #7
0
ファイル: cloudgene.py プロジェクト: nuwang/cloudman
 def status(self):
     """
     Check and update the status of the service.
     """
     if self.state == service_states.UNSTARTED or \
        self.state == service_states.STARTING or \
        self.state == service_states.SHUTTING_DOWN or \
        self.state == service_states.SHUT_DOWN or \
        self.state == service_states.WAITING_FOR_USER_ACTION:
         pass
     elif 'not running' in misc.getoutput("cd {0};./cloudgene -a status".format(
          self.cg_home), quiet=True):
         log.error("Cloudgene server not running.")
         self.state == service_states.ERROR
     else:
         self.state = service_states.RUNNING
コード例 #8
0
ファイル: cloudgene.py プロジェクト: blankenberg/cloudman
 def status(self):
     """
     Check and update the status of the service.
     """
     if self.state == service_states.UNSTARTED or \
        self.state == service_states.STARTING or \
        self.state == service_states.SHUTTING_DOWN or \
        self.state == service_states.SHUT_DOWN or \
        self.state == service_states.WAITING_FOR_USER_ACTION:
         pass
     elif 'not running' in misc.getoutput("cd {0};./cloudgene -a status".format(
          self.cg_home), quiet=True):
         log.error("Cloudgene server not running.")
         self.state == service_states.ERROR
     else:
         self.state = service_states.RUNNING
コード例 #9
0
 def status(self):
     """
     Update the status of this data service: make sure the mount point exists
     and that it is in /etc/exports for NFS
     """
     # log.debug("Checking the status of {0}".format(self.fs.mount_point))
     if self.fs._service_transitioning():
         # log.debug("Data service {0}
         # transitioning".format(self.fs.get_full_name()))
         pass
     elif self.fs._service_starting():
         # log.debug("Data service {0}
         # starting".format(self.fs.get_full_name()))
         pass
     elif not os.path.exists(self.fs.mount_point):
         # log.debug("Data service {0} dir {1} not there?".format(
         #           self.fs.get_full_name(), self.fs.mount_point))
         self.fs.state = service_states.UNSTARTED
     else:
         try:
             if NFSExport.find_mount_point_entry(self.fs.mount_point) > -1:
                 self.fs.state = service_states.RUNNING
                 # Transient storage needs to be special-cased because
                 # it's not a mounted disk per se but a disk on an
                 # otherwise default device for an instance (i.e., /mnt)
                 update_size_cmd = ("df --block-size 1 | grep /mnt$ | "
                                    "awk '{print $2, $3, $5}'")
                 # Some AWS instance types do not have transient storage
                 # and /mnt is just part of / so report that file system size
                 if not misc.getoutput(update_size_cmd, quiet=True):
                     update_size_cmd = ("df --block-size 1 | grep /$ | "
                                        "awk '{print $2, $3, $5}'")
                 self.fs._update_size(cmd=update_size_cmd)
             else:
                 # Or should this set it to UNSTARTED? Because this FS is just an
                 # NFS-exported file path...
                 log.warning(
                     "Data service {0} not found in /etc/exports; error!".
                     format(self.fs.get_full_name()))
                 self.fs.state = service_states.ERROR
         except Exception, e:
             log.error(
                 "Error checking the status of {0} service: {1}".format(
                     self.fs.get_full_name(), e))
             self.fs.state = service_states.ERROR
コード例 #10
0
ファイル: clouderamanager.py プロジェクト: martenson/cloudman
 def status(self):
     """
     Check and update the status of the service.
     """
     if self.state == service_states.UNSTARTED or \
        self.state == service_states.STARTING or \
        self.state == service_states.SHUTTING_DOWN or \
        self.state == service_states.SHUT_DOWN or \
        self.state == service_states.WAITING_FOR_USER_ACTION:
         pass
     elif 'running' not in misc.getoutput('service cloudera-scm-server status',
        quiet=True):
         log.error("Cloudera server not running!")
         self.state = service_states.ERROR
     elif not self.started:
         pass
     else:
         self.state = service_states.RUNNING
コード例 #11
0
 def status(self):
     """
     Update the status of this data service: ake sure the mount point exists
     and that it is in /etc/exports for NFS
     """
     # log.debug("Checking the status of {0}".format(self.fs.mount_point))
     if self.fs._service_transitioning():
         # log.debug("Data service {0}
         # transitioning".format(self.fs.get_full_name()))
         pass
     elif self.fs._service_starting():
         # log.debug("Data service {0}
         # starting".format(self.fs.get_full_name()))
         pass
     elif not os.path.exists(self.fs.mount_point):
         # log.debug("Data service {0} dir {1} not there?".format(
         #           self.fs.get_full_name(), self.fs.mount_point))
         self.fs.state = service_states.UNSTARTED
     else:
         try:
             if NFSExport.find_mount_point_entry(self.fs.mount_point) > -1:
                 self.fs.state = service_states.RUNNING
                 # Transient storage needs to be special-cased because
                 # it's not a mounted disk per se but a disk on an
                 # otherwise default device for an instance (i.e., /mnt)
                 update_size_cmd = ("df --block-size 1 | grep /mnt$ | "
                                    "awk '{print $2, $3, $5}'")
                 # Some AWS instance types do not have transient storage
                 # and /mnt is just part of / so report that file system size
                 if not misc.getoutput(update_size_cmd, quiet=True):
                     update_size_cmd = ("df --block-size 1 | grep /$ | "
                                        "awk '{print $2, $3, $5}'")
                 self.fs._update_size(cmd=update_size_cmd)
             else:
                 # Or should this set it to UNSTARTED? Because this FS is just an
                 # NFS-exported file path...
                 log.warning("Data service {0} not found in /etc/exports; error!"
                             .format(self.fs.get_full_name()))
                 self.fs.state = service_states.ERROR
         except Exception, e:
             log.error("Error checking the status of {0} service: {1}".format(
                 self.fs.get_full_name(), e))
             self.fs.state = service_states.ERROR
コード例 #12
0
ファイル: postgres.py プロジェクト: inarihtam/cloudman
    def check_postgres(self):
        """
        Check if PostgreSQL server is running and if `galaxy` database exists.

        :rtype: bool
        :return: ``True`` if the server is running and `galaxy` database exists,
                 ``False`` otherwise.
        """
        # log.debug("\tChecking PostgreSQL")
        if self._check_daemon('postgres'):
            # log.debug("\tPostgreSQL daemon running. Trying to connect and
            # select tables.")
            cmd = ('%s - postgres -c "%s/psql -p %s -c \\\"SELECT datname FROM PG_DATABASE;\\\" "'
                   % (paths.P_SU, self.app.path_resolver.pg_home, self.psql_port))
            dbs = misc.getoutput(cmd, quiet=True)
            if dbs.find('galaxy') > -1:
                # log.debug("\tPostgreSQL daemon on port {0} OK, 'galaxy' database exists."
                #     .format(self.psql_port))
                return True
        return False
コード例 #13
0
ファイル: nginx.py プロジェクト: kaktus42/cloudman
    def reconfigure(self, setup_ssl):
        """
        (Re)Generate Nginx configuration files and reload the server process.

        :type   setup_ssl: boolean
        :param  setup_ssl: if set, force HTTPS with a self-signed certificate.
        """
        if self.exe:
            log.debug("Updating Nginx config at {0}".format(self.conf_file))
            params = {}
            # Customize the appropriate nginx template
            if "1.4" in misc.getoutput("{0} -v".format(self.exe)):
                nginx_tmplt = conf_manager.NGINX_14_CONF_TEMPLATE
                params = {'galaxy_user_name': paths.GALAXY_USER_NAME,
                          'nginx_conf_dir': self.conf_dir}
                if setup_ssl:
                    log.debug("Using Nginx v1.4+ template w/ SSL")
                    # Generate a self-signed certificate
                    log.info("Generating self-signed certificate for SSL encryption")
                    cert_home = "/root/.ssh/"
                    certfile = os.path.join(cert_home, "instance_selfsigned_cert.pem")
                    keyfile = os.path.join(cert_home, "instance_selfsigned_key.pem")
                    misc.run("yes '' | openssl req -x509 -nodes -days 3650 -newkey "
                             "rsa:1024 -keyout " + keyfile + " -out " + certfile)
                    misc.run("chmod 440 " + keyfile)
                    server_tmplt = conf_manager.NGINX_SERVER_SSL
                    self.ssl_is_on = True
                else:
                    log.debug("Using Nginx v1.4+ template")
                    server_tmplt = conf_manager.NGINX_SERVER
                    self.ssl_is_on = False
            else:
                server_tmplt = ""
                nginx_tmplt = conf_manager.NGINX_CONF_TEMPLATE
                self.ssl_is_on = False
                params = {
                    'galaxy_user_name': paths.GALAXY_USER_NAME,
                    'galaxy_home': paths.P_GALAXY_HOME,
                    'galaxy_data': self.app.path_resolver.galaxy_data,
                }
                log.debug("Using Nginx pre-v1.4 template")
            # Write out the main nginx.conf file
            self._write_template_file(nginx_tmplt, params, self.conf_file)
            # Write out the default server block file
            if server_tmplt:
                # This means we're dealing with Nginx v1.4+ & split conf files
                upstream_servers = self._define_upstream_servers()
                params = {
                    'upstream_servers': upstream_servers,
                    'nginx_conf_dir': self.conf_dir
                }
                conf_file = os.path.join(self.conf_dir, 'sites-enabled', 'default.server')
                self._write_template_file(server_tmplt, params, conf_file)
                # Pulsar has it's own server config
                pulsar_svc = self.app.manager.service_registry.get_active('Pulsar')
                if pulsar_svc:
                    pulsar_tmplt = conf_manager.NGINX_SERVER_PULSAR
                    params = {'pulsar_port': pulsar_svc.pulsar_port}
                    conf_file = os.path.join(self.conf_dir, 'sites-enabled', 'pulsar.server')
                    self._write_template_file(pulsar_tmplt, params, conf_file)
                # Write out the location blocks for hosted services
                # Always include default locations (CloudMan, VNC, error)
                default_tmplt = conf_manager.NGINX_DEFAULT
                conf_file = os.path.join(self.conf_dir, 'sites-enabled', 'default.locations')
                self._write_template_file(default_tmplt, {}, conf_file)
                # Now add running services
                # Galaxy Reports
                reports_svc = self.app.manager.service_registry.get_active('GalaxyReports')
                reports_conf_file = os.path.join(self.conf_dir, 'sites-enabled', 'reports.locations')
                if reports_svc:
                    reports_tmplt = conf_manager.NGINX_GALAXY_REPORTS
                    params = {'reports_port': reports_svc.reports_port}
                    self._write_template_file(reports_tmplt, params, reports_conf_file)
                else:
                    misc.delete_file(reports_conf_file)
                # Galaxy
                galaxy_svc = self.app.manager.service_registry.get_active('Galaxy')
                gxy_conf_file = os.path.join(self.conf_dir, 'sites-enabled', 'galaxy.locations')
                if galaxy_svc:
                    galaxy_tmplt = conf_manager.NGINX_GALAXY
                    params = {
                        'galaxy_home': paths.P_GALAXY_HOME,
                        'galaxy_data': self.app.path_resolver.galaxy_data
                    }
                    self._write_template_file(galaxy_tmplt, params, gxy_conf_file)
                else:
                    misc.delete_file(gxy_conf_file)
                # Cloudera Manager
                cmf_svc = self.app.manager.service_registry.get_active('ClouderaManager')
                cmf_conf_file = os.path.join(self.conf_dir, 'sites-enabled', 'cmf.locations')
                if cmf_svc:
                    cmf_tmplt = conf_manager.NGINX_CLOUDERA_MANAGER
                    self._write_template_file(cmf_tmplt, {}, cmf_conf_file)
                else:
                    misc.delete_file(cmf_conf_file)
                # Cloudgene
                cg_svc = self.app.manager.service_registry.get_active('Cloudgene')
                cg_conf_file = os.path.join(self.conf_dir, 'sites-enabled', 'cloudgene.locations')
                if cg_svc:
                    cg_tmplt = conf_manager.NGINX_CLOUDGENE
                    params = {'cg_port': cg_svc.port}
                    self._write_template_file(cg_tmplt, params, cg_conf_file)
                else:
                    misc.delete_file(cg_conf_file)
            self.reload()
        else:
            log.warning("Cannot find nginx executable to reload nginx config (got"
                        " '{0}')".format(self.exe))
コード例 #14
0
 def configure_proftpd(self):
     """
     Configure environment for running ProFTPd service.
     """
     log.debug("Configuring ProFTPd")
     # Because we're rewriting the proftpd config file below, update the
     # password for PostgreSQL galaxyftp user
     gftp_pwd = self.app.path_resolver.proftpd_galaxyftp_user_pwd
     log.debug(
         "Setting psql password for galaxyftp role to {0}".format(gftp_pwd))
     # Check if galaxtftp role already exists
     cmd = (
         '{0} - postgres -c"{1} -p {2} -tAc\\\"SELECT 1 FROM pg_roles WHERE rolname=\'galaxyftp\'\\\""'
         .format(paths.P_SU, self.app.path_resolver.psql_cmd,
                 self.app.path_resolver.psql_db_port))
     role = 'ALTER' if misc.getoutput(cmd) == '1' else 'CREATE'
     # Now either CREATE or ALTER the galaxyftp role to set the permissions
     cmd = (
         '{0} - postgres -c"{1} -p {4} -c\\\"{2} ROLE galaxyftp LOGIN PASSWORD \'{3}\'\\\""'
         .format(paths.P_SU, self.app.path_resolver.psql_cmd, role,
                 gftp_pwd, self.app.path_resolver.psql_db_port))
     misc.run(cmd)
     # Update the config to match the current environment
     proftpd_tmplt = conf_manager.PROFTPD_CONF_TEMPLATE
     proftpd_conf_template = conf_manager.load_conf_template(proftpd_tmplt)
     params = {
         'galaxy_user_name': paths.GALAXY_USER_NAME,
         'galaxyftp_user_name': 'galaxyftp',
         'psql_galaxyftp_password': gftp_pwd,
         'galaxy_db_port': self.app.path_resolver.psql_db_port,
         'galaxyFS_base_path': self.app.path_resolver.galaxy_data,
         'public_ip_address': self.app.cloud_interface.get_public_ip()
     }
     template = proftpd_conf_template.substitute(params)
     # Write out the config file
     with open(self.app.path_resolver.proftpd_conf_file, 'w') as f:
         print >> f, template
     log.debug("Updated ProFTPd conf file {0}".format(
         self.app.path_resolver.proftpd_conf_file))
     # Place the FTP welcome message file
     urllib.urlretrieve(
         "https://s3.amazonaws.com/cloudman/files/proftpd_welcome.txt",
         "/usr/proftpd/etc/welcome_msg.txt")
     # Setup the Galaxy data dir for FTP
     ftp_data_dir = '%s/tmp/ftp' % self.app.path_resolver.galaxy_data
     if not os.path.exists(ftp_data_dir):
         os.makedirs(ftp_data_dir)
     attempt_chown_galaxy(ftp_data_dir)
     # Some images have vsFTPd server included so stop it first
     vsFTPd_exists = misc.run('status vsftpd', quiet=True)
     if vsFTPd_exists and 'start/running' in vsFTPd_exists:
         log.debug("Stopping vsFTPd")
         misc.run('stop vsftpd')
     # Start the server now
     if misc.run('/etc/init.d/proftpd start'):
         self.state = service_states.RUNNING
         return True
     else:
         log.error("Trouble starting ProFTPd")
         self.state = service_states.ERROR
         return False
コード例 #15
0
ファイル: nginx.py プロジェクト: ddavidovic/cloudman
    def reconfigure(self, setup_ssl):
        """
        (Re)Generate Nginx configuration files and reload the server process.

        :type   setup_ssl: boolean
        :param  setup_ssl: if set, force HTTPS with a self-signed certificate.
        """
        if self.exe:
            log.debug("Updating Nginx config at {0}".format(self.conf_file))
            params = {}
            # Customize the appropriate nginx template
            if "1.4" in misc.getoutput("{0} -v".format(self.exe)):
                nginx_tmplt = conf_manager.NGINX_14_CONF_TEMPLATE
                params = {
                    'galaxy_user_name': paths.GALAXY_USER_NAME,
                    'nginx_conf_dir': self.conf_dir
                }
                if setup_ssl:
                    log.debug("Using Nginx v1.4+ template w/ SSL")
                    # Generate a self-signed certificate
                    log.info(
                        "Generating self-signed certificate for SSL encryption"
                    )
                    cert_home = "/root/.ssh/"
                    certfile = os.path.join(cert_home,
                                            "instance_selfsigned_cert.pem")
                    keyfile = os.path.join(cert_home,
                                           "instance_selfsigned_key.pem")
                    misc.run(
                        "yes '' | openssl req -x509 -nodes -days 3650 -newkey "
                        "rsa:1024 -keyout " + keyfile + " -out " + certfile)
                    misc.run("chmod 440 " + keyfile)
                    server_tmplt = conf_manager.NGINX_SERVER_SSL
                    self.ssl_is_on = True
                else:
                    log.debug("Using Nginx v1.4+ template")
                    server_tmplt = conf_manager.NGINX_SERVER
                    self.ssl_is_on = False
            else:
                server_tmplt = ""
                nginx_tmplt = conf_manager.NGINX_CONF_TEMPLATE
                self.ssl_is_on = False
                params = {
                    'galaxy_user_name': paths.GALAXY_USER_NAME,
                    'galaxy_home': paths.P_GALAXY_HOME,
                    'galaxy_data': self.app.path_resolver.galaxy_data,
                }
                log.debug("Using Nginx pre-v1.4 template")
            # Write out the main nginx.conf file
            self._write_template_file(nginx_tmplt, params, self.conf_file)
            # Write out the default server block file
            if server_tmplt:
                # This means we're dealing with Nginx v1.4+ & split conf files
                upstream_servers = self._define_upstream_servers()
                params = {
                    'upstream_servers': upstream_servers,
                    'nginx_conf_dir': self.conf_dir
                }
                conf_file = os.path.join(self.conf_dir, 'sites-enabled',
                                         'default.server')
                self._write_template_file(server_tmplt, params, conf_file)
                # Pulsar has it's own server config
                pulsar_svc = self.app.manager.service_registry.get_active(
                    'Pulsar')
                if pulsar_svc:
                    pulsar_tmplt = conf_manager.NGINX_SERVER_PULSAR
                    params = {'pulsar_port': pulsar_svc.pulsar_port}
                    conf_file = os.path.join(self.conf_dir, 'sites-enabled',
                                             'pulsar.server')
                    self._write_template_file(pulsar_tmplt, params, conf_file)
                # Write out the location blocks for hosted services
                # Always include default locations (CloudMan, VNC, error)
                default_tmplt = conf_manager.NGINX_DEFAULT
                conf_file = os.path.join(self.conf_dir, 'sites-enabled',
                                         'default.locations')
                self._write_template_file(default_tmplt, {}, conf_file)
                # Now add running services
                # Galaxy Reports
                reports_svc = self.app.manager.service_registry.get_active(
                    'GalaxyReports')
                reports_conf_file = os.path.join(self.conf_dir,
                                                 'sites-enabled',
                                                 'reports.locations')
                if reports_svc:
                    reports_tmplt = conf_manager.NGINX_GALAXY_REPORTS
                    params = {'reports_port': reports_svc.reports_port}
                    self._write_template_file(reports_tmplt, params,
                                              reports_conf_file)
                else:
                    misc.delete_file(reports_conf_file)
                # Galaxy
                galaxy_svc = self.app.manager.service_registry.get_active(
                    'Galaxy')
                gxy_conf_file = os.path.join(self.conf_dir, 'sites-enabled',
                                             'galaxy.locations')
                if galaxy_svc:
                    galaxy_tmplt = conf_manager.NGINX_GALAXY
                    params = {
                        'galaxy_home': paths.P_GALAXY_HOME,
                        'galaxy_data': self.app.path_resolver.galaxy_data
                    }
                    self._write_template_file(galaxy_tmplt, params,
                                              gxy_conf_file)
                else:
                    misc.delete_file(gxy_conf_file)
                # Cloudera Manager
                cmf_svc = self.app.manager.service_registry.get_active(
                    'ClouderaManager')
                cmf_conf_file = os.path.join(self.conf_dir, 'sites-enabled',
                                             'cmf.locations')
                if cmf_svc:
                    cmf_tmplt = conf_manager.NGINX_CLOUDERA_MANAGER
                    self._write_template_file(cmf_tmplt, {}, cmf_conf_file)
                else:
                    misc.delete_file(cmf_conf_file)
                # Cloudgene
                cg_svc = self.app.manager.service_registry.get_active(
                    'Cloudgene')
                cg_conf_file = os.path.join(self.conf_dir, 'sites-enabled',
                                            'cloudgene.locations')
                if cg_svc:
                    cg_tmplt = conf_manager.NGINX_CLOUDGENE
                    params = {'cg_port': cg_svc.port}
                    self._write_template_file(cg_tmplt, params, cg_conf_file)
                else:
                    misc.delete_file(cg_conf_file)
            # Reload the configuration if the process is running
            if self._check_daemon('nginx'):
                self.reload()
            else:
                log.debug("nginx process not running; did not reload config.")
        else:
            log.warning(
                "Cannot find nginx executable to reload nginx config (got"
                " '{0}')".format(self.exe))