示例#1
0
    def load_log(self, section):
        # read these from config file
        log_name = str(self.config[section]["log_name"])
        extension = str(self.config[section]["extension"])
        msg_types = str(self.config[section]["msg_types"])
        max_days = float(self.config[section]["max_days"])
        min_days = float(self.config[section]["min_days"])
        max_mbytes = float(self.config[section]["max_mbytes"])

        backupCount = 5
        try:
            backupCount = int(self.config[section]["backup_count"])
        except:
            pass  # backup_count may not exist in all sections

        compression = ''
        try:
            compression = str(self.config[section]["compression"])
        except:
            pass  # compress may not exist in all sections

        log_dir = "%s/%s" % (self.log_base_dir, log_name)
        os.makedirs(log_dir)

        logSupport.add_processlog_handler(log_name,
                                          log_dir,
                                          msg_types,
                                          extension,
                                          max_days,
                                          min_days,
                                          max_mbytes,
                                          backupCount=backupCount,
                                          compression=compression)

        return logging.getLogger(log_name), log_dir
示例#2
0
    def load_log(self, section):
        # read these from config file
        log_name = str(self.config[section]["log_name"])
        extension = str(self.config[section]["extension"])
        msg_types = str(self.config[section]["msg_types"])
        max_days = float(self.config[section]["max_days"])
        min_days = float(self.config[section]["min_days"])
        max_mbytes = float(self.config[section]["max_mbytes"])

        backupCount = 5
        try:
            backupCount = int(self.config[section]["backup_count"])
        except:
            pass  # backup_count may not exist in all sections

        log_dir = "%s/%s" % (self.log_base_dir, log_name)
        os.makedirs(log_dir)

        logSupport.add_processlog_handler(
            log_name,
            log_dir,
            msg_types,
            extension,
            max_days,
            min_days,
            max_mbytes,
            backupCount=backupCount)

        return logging.getLogger(log_name), log_dir
示例#3
0
def main(work_dir, action):
    startup_time=time.time()

    glideinFrontendConfig.frontendConfig.frontend_descript_file = os.path.join(work_dir, glideinFrontendConfig.frontendConfig.frontend_descript_file)
    frontendDescript = glideinFrontendConfig.FrontendDescript(work_dir)

    # the log dir is shared between the frontend main and the groups, so use a subdir
    logSupport.log_dir = os.path.join(frontendDescript.data['LogDir'], "frontend")

    # Configure frontend process logging
    process_logs = eval(frontendDescript.data['ProcessLogs']) 
    for plog in process_logs:
        logSupport.add_processlog_handler("frontend", logSupport.log_dir,
                                          plog['msg_types'], plog['extension'],
                                          int(float(plog['max_days'])),
                                          int(float(plog['min_days'])),
                                          int(float(plog['max_mbytes'])),
                                          int(float(plog['backup_count'])),
                                          plog['compression'])
    logSupport.log = logging.getLogger("frontend")
    logSupport.log.info("Logging initialized")
    logSupport.log.debug("Frontend startup time: %s" % str(startup_time))

    try:
        cleanup_environ()
        # we use a dedicated config... ignore the system-wide
        os.environ['CONDOR_CONFIG'] = frontendDescript.data['CondorConfig']

        sleep_time = int(frontendDescript.data['LoopDelay'])
        advertize_rate = int(frontendDescript.data['AdvertiseDelay'])
        max_parallel_workers = int(frontendDescript.data['GroupParallelWorkers'])
        restart_attempts = int(frontendDescript.data['RestartAttempts'])
        restart_interval = int(frontendDescript.data['RestartInterval'])


        groups = string.split(frontendDescript.data['Groups'], ',')
        groups.sort()

        glideinFrontendMonitorAggregator.monitorAggregatorConfig.config_frontend(os.path.join(work_dir, "monitor"), groups)
    except:
        logSupport.log.exception("Exception occurred configuring monitoring: ")
        raise

    glideinFrontendMonitoring.write_frontend_descript_xml(frontendDescript, os.path.join(work_dir, 'monitor/'))
    
    logSupport.log.info("Enabled groups: %s" % groups)

    # create lock file
    pid_obj = glideinFrontendPidLib.FrontendPidSupport(work_dir)

    # start
    try:
        pid_obj.register(action)
    except  glideinFrontendPidLib.pidSupport.AlreadyRunning, err:
        pid_obj.load_registered()
        logSupport.log.exception("Failed starting Frontend with action %s. Instance with pid %s is aready running for action %s. Exception during pid registration: %s" % 
                                 (action, pid_obj.mypid , str(pid_obj.action_type), err))
        raise
def init_logs(name, log_dir, process_logs):
    for plog in process_logs:
        logSupport.add_processlog_handler(name, log_dir,
                                          plog['msg_types'], plog['extension'],
                                          int(float(plog['max_days'])),
                                          int(float(plog['min_days'])),
                                          int(float(plog['max_mbytes'])))
        logSupport.log = logging.getLogger(name)
        logSupport.log.info("Logging initialized for %s" % name)
示例#5
0
def main(work_dir):
    startup_time=time.time()

    glideinFrontendConfig.frontendConfig.frontend_descript_file = os.path.join(work_dir, glideinFrontendConfig.frontendConfig.frontend_descript_file)
    frontendDescript = glideinFrontendConfig.FrontendDescript(work_dir)

    # the log dir is shared between the frontend main and the groups, so use a subdir
    logSupport.log_dir = os.path.join(frontendDescript.data['LogDir'], "frontend")

    # Configure frontend process logging
    process_logs = eval(frontendDescript.data['ProcessLogs']) 
    for plog in process_logs:
        logSupport.add_processlog_handler("frontend", logSupport.log_dir,
                                          plog['msg_types'], plog['extension'],
                                          int(float(plog['max_days'])),
                                          int(float(plog['min_days'])),
                                          int(float(plog['max_mbytes'])))
    logSupport.log = logging.getLogger("frontend")
    logSupport.log.info("Logging initialized")
    logSupport.log.debug("Frontend startup time: %s" % str(startup_time))

    try:
        cleanup_environ()
        # we use a dedicated config... ignore the system-wide
        os.environ['CONDOR_CONFIG'] = frontendDescript.data['CondorConfig']

        sleep_time = int(frontendDescript.data['LoopDelay'])
        advertize_rate = int(frontendDescript.data['AdvertiseDelay'])
        restart_attempts = int(frontendDescript.data['RestartAttempts'])
        restart_interval = int(frontendDescript.data['RestartInterval'])

        groups = string.split(frontendDescript.data['Groups'], ',')
        groups.sort()

        glideinFrontendMonitorAggregator.monitorAggregatorConfig.config_frontend(os.path.join(work_dir, "monitor"), groups)
    except:
        logSupport.log.exception("Exception occurred configuring monitoring: ")
        raise

    glideinFrontendMonitoring.write_frontend_descript_xml(frontendDescript, os.path.join(work_dir, 'monitor/'))
    
    # create lock file
    pid_obj = glideinFrontendPidLib.FrontendPidSupport(work_dir)

    # start
    pid_obj.register()
    try:
        try:
            spawn(sleep_time, advertize_rate, work_dir,
                  frontendDescript, groups, restart_attempts, restart_interval)
        except KeyboardInterrupt:
            logSupport.log.info("Received signal...exit")
        except:
            logSupport.log.exception("Exception occurred trying to spawn: ")
    finally:
        pid_obj.relinquish()
def init_logs(name, log_dir, process_logs):
    for plog in process_logs:
        logSupport.add_processlog_handler(name, log_dir, plog['msg_types'],
                                          plog['extension'],
                                          int(float(plog['max_days'])),
                                          int(float(plog['min_days'])),
                                          int(float(plog['max_mbytes'])),
                                          int(float(plog['backup_count'])),
                                          plog['compression'])
        logSupport.log = logging.getLogger(name)
        logSupport.log.info("Logging initialized for %s" % name)
示例#7
0
    def run(self):
        import logging
        from glideinwms.lib import logSupport

        logger_name = self.logger_name or _default_logger_name
        directory = self.directory or _default_directory
        extension = self.extension or _default_extension
        levels = self.levels or _default_levels
        max_days = self.maxDays or _default_max_days
        min_days = self.minDays or _default_min_days
        max_size = self.maxDays or _default_max_size

        logSupport.add_processlog_handler(logger_name, directory, levels,
                                          extension, max_days, min_days,
                                          max_size)
        logSupport.log = logging.getLogger(self.logger_name)
        _external_logger = logging.getLogger(self.logger_name)
        logSupport.log.info("GlideTester logging initialized.")
示例#8
0
    def run(self):
        import logging
        from glideinwms.lib import logSupport

        logger_name = self.logger_name or _default_logger_name
        directory = self.directory or _default_directory
        extension = self.extension or _default_extension
        levels = self.levels or _default_levels
        max_days = self.maxDays or _default_max_days
        min_days = self.minDays or _default_min_days
        max_size = self.maxDays or _default_max_size

        logSupport.add_processlog_handler(logger_name, directory,
                                        levels, extension,
                                        max_days, min_days, 
                                        max_size
        )
        logSupport.log = logging.getLogger(self.logger_name)
        _external_logger = logging.getLogger(self.logger_name)
        logSupport.log.info("GlideTester logging initialized.")
示例#9
0
def main(work_dir, action):
    startup_time = time.time()

    glideinFrontendConfig.frontendConfig.frontend_descript_file = os.path.join(work_dir, glideinFrontendConfig.frontendConfig.frontend_descript_file)
    frontendDescript = glideinFrontendConfig.FrontendDescript(work_dir)

    # the log dir is shared between the frontend main and the groups, so use a subdir
    logSupport.log_dir = os.path.join(frontendDescript.data['LogDir'],
                                      "frontend")

    # Configure frontend process logging
    process_logs = eval(frontendDescript.data['ProcessLogs']) 
    for plog in process_logs:
        logSupport.add_processlog_handler("frontend", logSupport.log_dir,
                                          plog['msg_types'], plog['extension'],
                                          int(float(plog['max_days'])),
                                          int(float(plog['min_days'])),
                                          int(float(plog['max_mbytes'])),
                                          int(float(plog['backup_count'])),
                                          plog['compression'])
    logSupport.log = logging.getLogger("frontend")
    logSupport.log.info("Logging initialized")
    logSupport.log.debug("Frontend startup time: %s" % str(startup_time))

    try:
        cleanup_environ()
        # we use a dedicated config... ignore the system-wide
        os.environ['CONDOR_CONFIG'] = frontendDescript.data['CondorConfig']

        sleep_time = int(frontendDescript.data['LoopDelay'])
        advertize_rate = int(frontendDescript.data['AdvertiseDelay'])
        max_parallel_workers = int(frontendDescript.data['GroupParallelWorkers'])
        restart_attempts = int(frontendDescript.data['RestartAttempts'])
        restart_interval = int(frontendDescript.data['RestartInterval'])


        groups = sorted(frontendDescript.data['Groups'].split(','))

        glideinFrontendMonitorAggregator.config_frontend(os.path.join(work_dir, "monitor"), groups)
    except:
        logSupport.log.exception("Exception occurred configuring monitoring: ")
        raise

    glideinFrontendMonitoring.write_frontend_descript_xml(
        frontendDescript, os.path.join(work_dir, 'monitor/'))
    
    logSupport.log.info("Enabled groups: %s" % groups)

    # create lock file
    pid_obj = glideinFrontendPidLib.FrontendPidSupport(work_dir)

    # start
    try:
        pid_obj.register(action)
    except  glideinFrontendPidLib.pidSupport.AlreadyRunning as err:
        pid_obj.load_registered()
        logSupport.log.exception("Failed starting Frontend with action %s. Instance with pid %s is aready running for action %s. Exception during pid registration: %s" % (action, pid_obj.mypid, str(pid_obj.action_type), err))
        raise

    try:
        try:
            if action == "run":
                spawn(sleep_time, advertize_rate, work_dir,
                      frontendDescript, groups, max_parallel_workers,
                      restart_interval, restart_attempts)
            elif action in ('removeWait', 'removeIdle', 'removeAll', 'removeWaitExcess', 'removeIdleExcess', 'removeAllExcess'):
                spawn_removal(work_dir, frontendDescript, groups,
                              max_parallel_workers, action)
            else:
                raise ValueError("Unknown action: %s" % action)
        except KeyboardInterrupt:
            logSupport.log.info("Received signal...exit")
        except HUPException:
            logSupport.log.info("Received SIGHUP, reload config")
            pid_obj.relinquish()
            os.execv( os.path.join(FRONTEND_DIR, "../creation/reconfig_frontend"), ['reconfig_frontend', '-sighupreload', '-xml', '/etc/gwms-frontend/frontend.xml'] )
        except:
            logSupport.log.exception("Exception occurred trying to spawn: ")
    finally:
        pid_obj.relinquish()
示例#10
0
def main(startup_dir):
    """
    Reads in the configuration file and starts up the factory

    @type startup_dir: String
    @param startup_dir: Path to glideinsubmit directory
    """

    # Force integrity checks on all condor operations
    glideFactoryLib.set_condor_integrity_checks()

    glideFactoryInterface.factoryConfig.lock_dir = os.path.join(startup_dir,
                                                                "lock")

    glideFactoryConfig.factoryConfig.glidein_descript_file = \
        os.path.join(startup_dir,
                     glideFactoryConfig.factoryConfig.glidein_descript_file)
    glideinDescript = glideFactoryConfig.GlideinDescript()
    frontendDescript = glideFactoryConfig.FrontendDescript()

    # set factory_collector at a global level, since we do not expect it to change
    glideFactoryInterface.factoryConfig.factory_collector = glideinDescript.data['FactoryCollector']

    # Setup the glideFactoryLib.factoryConfig so that we can process the
    # globals classads
    glideFactoryLib.factoryConfig.config_whoamI(
        glideinDescript.data['FactoryName'],
        glideinDescript.data['GlideinName'])
    glideFactoryLib.factoryConfig.config_dirs(
        startup_dir, glideinDescript.data['LogDir'],
        glideinDescript.data['ClientLogBaseDir'],
        glideinDescript.data['ClientProxiesBaseDir'])

    # Set the Log directory
    logSupport.log_dir = os.path.join(glideinDescript.data['LogDir'], "factory")

    # Configure factory process logging
    process_logs = eval(glideinDescript.data['ProcessLogs'])
    for plog in process_logs:
        if 'ADMIN' in plog['msg_types'].upper():
            logSupport.add_processlog_handler("factoryadmin",
                                              logSupport.log_dir,
                                              "DEBUG,INFO,WARN,ERR",
                                              plog['extension'],
                                              int(float(plog['max_days'])),
                                              int(float(plog['min_days'])),
                                              int(float(plog['max_mbytes'])),
                                              int(float(plog['backup_count'])),
                                              plog['compression'])
        else:
            logSupport.add_processlog_handler("factory",
                                              logSupport.log_dir,
                                              plog['msg_types'],
                                              plog['extension'],
                                              int(float(plog['max_days'])),
                                              int(float(plog['min_days'])),
                                              int(float(plog['max_mbytes'])),
                                              int(float(plog['backup_count'])),
                                              plog['compression'])
    logSupport.log = logging.getLogger("factory")
    logSupport.log.info("Logging initialized")

    if (glideinDescript.data['Entries'].strip() in ('', ',')):
        # No entries are enabled. There is nothing to do. Just exit here.
        log_msg = "No Entries are enabled. Exiting."

        logSupport.log.error(log_msg)
        sys.exit(1)

    write_descript(glideinDescript,frontendDescript,os.path.join(startup_dir, 'monitor/'))

    try:
        os.chdir(startup_dir)
    except:
        logSupport.log.exception("Failed starting Factory. Unable to change to startup_dir: ")
        raise

    try:
        if (is_file_old(glideinDescript.default_rsakey_fname,
                        int(glideinDescript.data['OldPubKeyGraceTime']))):
            # First backup and load any existing key
            logSupport.log.info("Backing up and loading old key")
            glideinDescript.backup_and_load_old_key()
            # Create a new key for this run
            logSupport.log.info("Recreating and loading new key")
            glideinDescript.load_pub_key(recreate=True)
        else:
            # Key is recent enough. Just reuse it.
            logSupport.log.info("Key is recent enough, reusing for this run")
            glideinDescript.load_pub_key(recreate=False)
            logSupport.log.info("Loading old key")
            glideinDescript.load_old_rsa_key()
    except:
        logSupport.log.exception("Failed starting Factory. Exception occurred loading factory keys: ")
        raise

    glideFactoryMonitorAggregator.glideFactoryMonitoring.monitoringConfig.my_name = "%s@%s" % (glideinDescript.data['GlideinName'],
               glideinDescript.data['FactoryName'])

    glideFactoryInterface.factoryConfig.advertise_use_tcp = (glideinDescript.data['AdvertiseWithTCP'] in ('True', '1'))
    glideFactoryInterface.factoryConfig.advertise_use_multi = (glideinDescript.data['AdvertiseWithMultiple'] in ('True', '1'))
    sleep_time = int(glideinDescript.data['LoopDelay'])
    advertize_rate = int(glideinDescript.data['AdvertiseDelay'])
    restart_attempts = int(glideinDescript.data['RestartAttempts'])
    restart_interval = int(glideinDescript.data['RestartInterval'])

    try:
        glideinwms_dir = os.path.dirname(os.path.dirname(sys.argv[0]))
        glideFactoryInterface.factoryConfig.glideinwms_version = glideinWMSVersion.GlideinWMSDistro(glideinwms_dir, 'checksum.factory').version()
    except:
        logSupport.log.exception("Non critical Factory error. Exception occurred while trying to retrieve the glideinwms version: ")

    entries = glideinDescript.data['Entries'].split(',')
    entries.sort()

    glideFactoryMonitorAggregator.monitorAggregatorConfig.config_factory(
        os.path.join(startup_dir, "monitor"), entries,
        log = logSupport.log)

    # create lock file
    pid_obj = glideFactoryPidLib.FactoryPidSupport(startup_dir)

    increase_process_limit()

    # start
    try:
        pid_obj.register()
    except glideFactoryPidLib.pidSupport.AlreadyRunning, err:
        pid_obj.load_registered()
        logSupport.log.exception("Failed starting Factory. Instance with pid %s is aready running. Exception during pid registration: %s" % 
                                 (pid_obj.mypid , err))
        raise
示例#11
0
def main(startup_dir):
    """
    Reads in the configuration file and starts up the factory

    @type startup_dir: String
    @param startup_dir: Path to glideinsubmit directory
    """
    # Force integrity checks on all condor operations
    glideFactoryLib.set_condor_integrity_checks()

    glideFactoryInterface.factoryConfig.lock_dir = os.path.join(startup_dir,
                                                                "lock")
    glideFactoryConfig.factoryConfig.glidein_descript_file = \
        os.path.join(startup_dir,
                     glideFactoryConfig.factoryConfig.glidein_descript_file)
    glideinDescript = glideFactoryConfig.GlideinDescript()
    frontendDescript = glideFactoryConfig.FrontendDescript()

    # set factory_collector at a global level, since we do not expect it to change
    glideFactoryInterface.factoryConfig.factory_collector = glideinDescript.data['FactoryCollector']

    # Setup the glideFactoryLib.factoryConfig so that we can process the
    # globals classads
    glideFactoryLib.factoryConfig.config_whoamI(
        glideinDescript.data['FactoryName'],
        glideinDescript.data['GlideinName'])
    glideFactoryLib.factoryConfig.config_dirs(
        startup_dir, glideinDescript.data['LogDir'],
        glideinDescript.data['ClientLogBaseDir'],
        glideinDescript.data['ClientProxiesBaseDir'])

    # Set the Log directory
    logSupport.log_dir = os.path.join(glideinDescript.data['LogDir'], "factory")

    # Configure factory process logging
    process_logs = eval(glideinDescript.data['ProcessLogs'])
    for plog in process_logs:
        if 'ADMIN' in plog['msg_types'].upper():
            logSupport.add_processlog_handler("factoryadmin",
                                              logSupport.log_dir,
                                              "DEBUG,INFO,WARN,ERR",
                                              plog['extension'],
                                              int(float(plog['max_days'])),
                                              int(float(plog['min_days'])),
                                              int(float(plog['max_mbytes'])),
                                              int(float(plog['backup_count'])),
                                              plog['compression'])
        else:
            logSupport.add_processlog_handler("factory",
                                              logSupport.log_dir,
                                              plog['msg_types'],
                                              plog['extension'],
                                              int(float(plog['max_days'])),
                                              int(float(plog['min_days'])),
                                              int(float(plog['max_mbytes'])),
                                              int(float(plog['backup_count'])),
                                              plog['compression'])
    logSupport.log = logging.getLogger("factory")
    logSupport.log.info("Logging initialized")

    if (glideinDescript.data['Entries'].strip() in ('', ',')):
        # No entries are enabled. There is nothing to do. Just exit here.
        log_msg = "No Entries are enabled. Exiting."

        logSupport.log.error(log_msg)
        sys.exit(1)

    write_descript(glideinDescript, frontendDescript, os.path.join(startup_dir, 'monitor/'))

    try:
        os.chdir(startup_dir)
    except:
        logSupport.log.exception("Failed starting Factory. Unable to change to startup_dir: ")
        raise

    try:
        if (is_file_old(glideinDescript.default_rsakey_fname,
                        int(glideinDescript.data['OldPubKeyGraceTime']))):
            # First backup and load any existing key
            logSupport.log.info("Backing up and loading old key")
            glideinDescript.backup_and_load_old_key()
            # Create a new key for this run
            logSupport.log.info("Recreating and loading new key")
            glideinDescript.load_pub_key(recreate=True)
        else:
            # Key is recent enough. Just reuse it.
            logSupport.log.info("Key is recent enough, reusing for this run")
            glideinDescript.load_pub_key(recreate=False)
            logSupport.log.info("Loading old key")
            glideinDescript.load_old_rsa_key()
    except RSAError as e:
        logSupport.log.exception("Failed starting Factory. Exception occurred loading factory keys: ")
        key_fname = getattr(e, 'key_fname', None)
        cwd = getattr(e, 'cwd', None)
        if key_fname and cwd:
            logSupport.log.error("Failed to load RSA key %s with current working direcotry %s", key_fname, cwd)
            logSupport.log.error("If you think the rsa key might be corrupted, try to remove it, and then reconfigure the factory to recreate it")
        raise
    except IOError as ioe:
        logSupport.log.exception("Failed starting Factory. Exception occurred loading factory keys: ")
        if ioe.filename == 'rsa.key' and ioe.errno == 2:
             logSupport.log.error("Missing rsa.key file. Please, reconfigure the factory to recreate it")
        raise
    except:
        logSupport.log.exception("Failed starting Factory. Exception occurred loading factory keys: ")
        raise

    glideFactoryMonitorAggregator.glideFactoryMonitoring.monitoringConfig.my_name = "%s@%s" % (glideinDescript.data['GlideinName'],
               glideinDescript.data['FactoryName'])

    glideFactoryInterface.factoryConfig.advertise_use_tcp = (glideinDescript.data['AdvertiseWithTCP'] in ('True', '1'))
    glideFactoryInterface.factoryConfig.advertise_use_multi = (glideinDescript.data['AdvertiseWithMultiple'] in ('True', '1'))
    sleep_time = int(glideinDescript.data['LoopDelay'])
    advertize_rate = int(glideinDescript.data['AdvertiseDelay'])
    restart_attempts = int(glideinDescript.data['RestartAttempts'])
    restart_interval = int(glideinDescript.data['RestartInterval'])

    try:
        glideinwms_dir = os.path.dirname(os.path.dirname(sys.argv[0]))
        glideFactoryInterface.factoryConfig.glideinwms_version = glideinWMSVersion.GlideinWMSDistro(glideinwms_dir, 'checksum.factory').version()
    except:
        logSupport.log.exception("Non critical Factory error. Exception occurred while trying to retrieve the glideinwms version: ")

    entries = sorted(glideinDescript.data['Entries'].split(','))

    glideFactoryMonitorAggregator.monitorAggregatorConfig.config_factory(
        os.path.join(startup_dir, "monitor"), entries,
        log=logSupport.log
    )

    # create lock file
    pid_obj = glideFactoryPidLib.FactoryPidSupport(startup_dir)

    increase_process_limit()

    # start
    try:
        pid_obj.register()
    except glideFactoryPidLib.pidSupport.AlreadyRunning as err:
        pid_obj.load_registered()
        logSupport.log.exception("Failed starting Factory. Instance with pid %s is aready running. Exception during pid registration: %s" %
                                 (pid_obj.mypid, err))
        raise
    try:
        try:
            spawn(sleep_time, advertize_rate, startup_dir, glideinDescript,
                  frontendDescript, entries, restart_attempts, restart_interval)
        except KeyboardInterrupt as e:
            raise e
        except HUPException as e:
            # inside spawn(), outermost try will catch HUPException, 
            # then the code within the finally will run
            # which will terminate glideFactoryEntryGroup children processes
            # and then the following 3 lines will be executed.
            logSupport.log.info("Received SIGHUP, reload config uid = %d" % os.getuid())
            # must empty the lock file so that when the thread returns from reconfig_glidein and 
            # begins from the beginning, it will not error out which will happen 
            # if the lock file is not empty
            pid_obj.relinquish()
            os.execv(os.path.join(FACTORY_DIR, "../creation/reconfig_glidein"),
                     ['reconfig_glidein', '-update_scripts', 'no', '-sighupreload', '-xml', '/etc/gwms-factory/glideinWMS.xml'])
        except:
            logSupport.log.exception("Exception occurred spawning the factory: ")
    finally:
        pid_obj.relinquish()
def main(parent_pid, work_dir, group_name):
    startup_time = time.time()

    elementDescript = glideinFrontendConfig.ElementMergedDescript(work_dir, group_name)

    # the log dir is shared between the frontend main and the groups, so use a subdir
    logSupport.log_dir = os.path.join(elementDescript.frontend_data['LogDir'], "group_%s" % group_name)
    
    # Configure frontend group process logging
    process_logs = eval(elementDescript.frontend_data['ProcessLogs']) 
    for plog in process_logs:
        logSupport.add_processlog_handler(group_name, logSupport.log_dir, plog['msg_types'], plog['extension'],
                                      int(float(plog['max_days'])),
                                      int(float(plog['min_days'])),
                                      int(float(plog['max_mbytes'])))
    logSupport.log = logging.getLogger(group_name)
    logSupport.log.info("Logging initialized")
    logSupport.log.debug("Frontend Element startup time: %s" % str(startup_time))

    paramsDescript = glideinFrontendConfig.ParamsDescript(work_dir, group_name)
    attrsDescript = glideinFrontendConfig.AttrsDescript(work_dir,group_name)
    signatureDescript = glideinFrontendConfig.GroupSignatureDescript(work_dir, group_name)
    #
    # We decided we will not use the data from the stage area
    # Leaving it commented in the code, in case we decide in the future
    #  it was a good validation of the Web server health
    #
    #stageArea=glideinFrontendConfig.MergeStageFiles(elementDescript.frontend_data['WebURL'],
    #                                                signatureDescript.signature_type,
    #                                                signatureDescript.frontend_descript_fname,signatureDescript.frontend_descript_signature,
    #                                                group_name,
    #                                                signatureDescript.group_descript_fname,signatureDescript.group_descript_signature)
    # constsDescript=stageArea.get_constants()
    #

    attr_dict=attrsDescript.data

    glideinFrontendMonitoring.monitoringConfig.monitor_dir = os.path.join(work_dir, "monitor/group_%s" % group_name)

    glideinFrontendInterface.frontendConfig.advertise_use_tcp = (elementDescript.frontend_data['AdvertiseWithTCP'] in ('True', '1'))
    glideinFrontendInterface.frontendConfig.advertise_use_multi = (elementDescript.frontend_data['AdvertiseWithMultiple'] in ('True', '1'))

    try:
        glideinwms_dir = os.path.dirname(os.path.dirname(sys.argv[0]))
        glideinFrontendInterface.frontendConfig.glideinwms_version = glideinWMSVersion.GlideinWMSDistro(glideinwms_dir, 'checksum.frontend').version()
    except:
        logSupport.log.exception("Exception occurred while trying to retrieve the glideinwms version: ")

    if len(elementDescript.merged_data['Proxies']) > 0:
        if not glideinFrontendPlugins.proxy_plugins.has_key(elementDescript.merged_data['ProxySelectionPlugin']):
            logSupport.log.warning("Invalid ProxySelectionPlugin '%s', supported plugins are %s" % (elementDescript.merged_data['ProxySelectionPlugin']), glideinFrontendPlugins.proxy_plugins.keys())
            return 1
        x509_proxy_plugin = glideinFrontendPlugins.proxy_plugins[elementDescript.merged_data['ProxySelectionPlugin']](os.path.join(work_dir, "group_%s" % group_name), glideinFrontendPlugins.createCredentialList(elementDescript))
    else:
        # no proxies, will try to use the factory one
        x509_proxy_plugin = None

    # set the condor configuration and GSI setup globally, so I don't need to worry about it later on
    os.environ['CONDOR_CONFIG'] = elementDescript.frontend_data['CondorConfig']
    os.environ['_CONDOR_CERTIFICATE_MAPFILE'] = elementDescript.element_data['MapFile']
    os.environ['X509_USER_PROXY'] = elementDescript.frontend_data['ClassAdProxy']

    # create lock file
    pid_obj = glideinFrontendPidLib.ElementPidSupport(work_dir, group_name)

    pid_obj.register(parent_pid)
    try:
        try:
            logSupport.log.info("Starting up")
            iterate(parent_pid, elementDescript, paramsDescript, attr_dict, signatureDescript, x509_proxy_plugin)
        except KeyboardInterrupt:
            logSupport.log.info("Received signal...exit")
        except:
            logSupport.log.exception("Unhandled exception, dying: ")
    finally:
        pid_obj.relinquish()
示例#13
0
def main(work_dir, action):
    startup_time = time.time()

    glideinFrontendConfig.frontendConfig.frontend_descript_file = os.path.join(work_dir, glideinFrontendConfig.frontendConfig.frontend_descript_file)
    frontendDescript = glideinFrontendConfig.FrontendDescript(work_dir)

    # the log dir is shared between the frontend main and the groups, so use a subdir
    logSupport.log_dir = os.path.join(frontendDescript.data['LogDir'],
                                      "frontend")

    # Configure frontend process logging
    process_logs = eval(frontendDescript.data['ProcessLogs']) 
    for plog in process_logs:
        logSupport.add_processlog_handler("frontend", logSupport.log_dir,
                                          plog['msg_types'], plog['extension'],
                                          int(float(plog['max_days'])),
                                          int(float(plog['min_days'])),
                                          int(float(plog['max_mbytes'])),
                                          int(float(plog['backup_count'])),
                                          plog['compression'])
    logSupport.log = logging.getLogger("frontend")
    logSupport.log.info("Logging initialized")
    logSupport.log.debug("Frontend startup time: %s" % str(startup_time))

    try:
        cleanup_environ()
        # we use a dedicated config... ignore the system-wide
        os.environ['CONDOR_CONFIG'] = frontendDescript.data['CondorConfig']

        sleep_time = int(frontendDescript.data['LoopDelay'])
        advertize_rate = int(frontendDescript.data['AdvertiseDelay'])
        max_parallel_workers = int(frontendDescript.data['GroupParallelWorkers'])
        restart_attempts = int(frontendDescript.data['RestartAttempts'])
        restart_interval = int(frontendDescript.data['RestartInterval'])


        groups = sorted(frontendDescript.data['Groups'].split(','))

        glideinFrontendMonitorAggregator.monitorAggregatorConfig.config_frontend(os.path.join(work_dir, "monitor"), groups)
    except:
        logSupport.log.exception("Exception occurred configuring monitoring: ")
        raise

    glideinFrontendMonitoring.write_frontend_descript_xml(
        frontendDescript, os.path.join(work_dir, 'monitor/'))
    
    logSupport.log.info("Enabled groups: %s" % groups)

    # create lock file
    pid_obj = glideinFrontendPidLib.FrontendPidSupport(work_dir)

    # start
    try:
        pid_obj.register(action)
    except  glideinFrontendPidLib.pidSupport.AlreadyRunning as err:
        pid_obj.load_registered()
        logSupport.log.exception("Failed starting Frontend with action %s. Instance with pid %s is aready running for action %s. Exception during pid registration: %s" % (action, pid_obj.mypid, str(pid_obj.action_type), err))
        raise

    try:
        try:
            if action == "run":
                spawn(sleep_time, advertize_rate, work_dir,
                      frontendDescript, groups, max_parallel_workers,
                      restart_interval, restart_attempts)
            elif action in ('removeWait', 'removeIdle', 'removeAll', 'removeWaitExcess', 'removeIdleExcess', 'removeAllExcess'):
                spawn_removal(work_dir, frontendDescript, groups,
                              max_parallel_workers, action)
            else:
                raise ValueError("Unknown action: %s" % action)
        except KeyboardInterrupt:
            logSupport.log.info("Received signal...exit")
        except HUPException:
            logSupport.log.info("Received SIGHUP, reload config")
            pid_obj.relinquish()
            os.execv( os.path.join(FRONTEND_DIR, "../creation/reconfig_frontend"), ['reconfig_frontend', '-sighupreload', '-xml', '/etc/gwms-frontend/frontend.xml'] )
        except:
            logSupport.log.exception("Exception occurred trying to spawn: ")
    finally:
        pid_obj.relinquish()