Esempio n. 1
0
def main():
    """Set up and run the nowcast system message broker.

    Set-up includes:

    * Building the command-line parser, and parsing the command-line used
      to launch the message broker
    * Reading and parsing the configuration file given on the command-line
    * Configuring the logging system as specified in the configuration file
    * Log the message broker's PID, and the file path/name that was used to
      configure it.

    The set-up is repeated if the message broker process receives a HUP signal
    so that the configuration can be re-loaded without having to stop and
    re-start the message broker.

    After the set-up is complete, launch the broker message queuing process.

    See :command:`python -m nemo_nowcast.message_broker --help`
    for details of the command-line interface.
    """
    cli = CommandLineInterface(NAME, package="nemo_nowcast", description=__doc__)
    cli.build_parser()
    parsed_args = cli.parser.parse_args()
    config = Config()
    config.load(parsed_args.config_file)
    msg = _configure_logging(config)
    logger.info(f"running in process {os.getpid()}")
    logger.info(f"read config from {config.file}")
    logger.info(msg)
    run(config)
Esempio n. 2
0
	def apply_config(self, new_config):
		# check if new config changed
		if(new_config != config.getDict()):
			# disable while loading config
			self.active = False
			
			# TODO: deactivate queues
			logging.info("Cleaning up actions and sensors")
			self.cleanup_sensors()
			self.cleanup_actions()
			
			# TODO: check valid config file?!
			# write config to file
			try:
				f = open('%s/worker/config.json'%(PROJECT_PATH),'w')
				f.write(json.dumps(new_config))
				f.close()
			except Exception as e:
				logging.exception("Wasn't able to write config file:\n%s" % e)
			
			# set new config
			config.load(PROJECT_PATH +"/worker/config.json")
			
			if(config.get('active')):
				logging.info("Activating actions and sensors")
				self.setup_sensors()
				self.setup_actions()
				# TODO: activate queues
				self.active = True
			
			logging.info("Config saved successfully...")
		else:
			logging.info("Config didn't change")
Esempio n. 3
0
def blu(rip: bool,
        config_location: str,
        log_config: str,
        re_run: bool = False):
    setup_logging(default_path=log_config)
    config: Config = Config()

    if config_location:
        config.load(config_location)
    else:
        config.load('./blu.yml')

    if rip:

        if re_run:
            makeMKV: MakeMKV = MakeMKV(MockCommander())
            compressor: HandbrakeCLI = HandbrakeCLI(MockCommander())
        else:
            makeMKV: MakeMKV = MakeMKV(CommanderImpl())
            compressor: HandbrakeCLI = HandbrakeCLI(CommanderImpl())

        drives: List[Drive] = makeMKV.scan_drives()
        identifier: Identifier = Identifier()

        for drive in drives:
            drive = makeMKV.scan_disc(drive)
            identifier.identify(drive.disc)

            if drive.disc.is_series():
                ripSeries(compressor, config, drive, makeMKV, re_run)
            else:
                ripMovie(compressor, config, drive, makeMKV, re_run)
Esempio n. 4
0
    def apply_config(self, new_config):
        # check if new config changed
        if (new_config != config.getDict()):
            # disable while loading config
            self.active = False

            # TODO: deactivate queues
            logging.info("Cleaning up actions and sensors")
            self.cleanup_sensors()
            self.cleanup_actions()

            # TODO: check valid config file?!
            # write config to file
            try:
                f = open('%s/worker/config.json' % (PROJECT_PATH), 'w')
                f.write(json.dumps(new_config))
                f.close()
            except Exception as e:
                logging.exception("Wasn't able to write config file:\n%s" % e)

            # set new config
            config.load(PROJECT_PATH + "/worker/config.json")

            if (config.get('active')):
                logging.info("Activating actions and sensors")
                self.setup_sensors()
                self.setup_actions()
                # TODO: activate queues
                self.active = True

            logging.info("Config saved successfully...")
        else:
            logging.info("Config didn't change")
Esempio n. 5
0
    def load_config(self, parser):
        options, args = parser.parse_args(self._args)
        self.config_path = options.config
        
        def load_file(filename):
            f = self._open(filename, 'r')
            d = {'__file__': filename}
            exec f.read() in d,d
            return d

        if options.config:
            parser.set_defaults(**load_file(options.config))
        elif len(args) == 0 or args[0] in ['start', 'restart', 'run']:
            parser.error("a configuration file is required to start")

        for ex in options.extensions:
            try:
                parser.set_defaults(**load_file(ex))
            except IOError:
                # couldn't open the file try to interpret as python
                d = {}
                exec ex in d,d
                parser.set_defaults(**d)

        # Now we parse args again with the config file settings as defaults
        options, args = parser.parse_args(self._args)
        config.load(options.__dict__)
        return args
Esempio n. 6
0
    def load_config(self, parser):
        options = parser.parse_args(self._args)
        self.config_path = options.config

        def load_file(filename):
            f = self._open(filename, 'r')
            d = {'__file__': filename}
            exec f.read() in d, d
            return d

        parser.set_defaults(**load_file(options.config))

        for ex in options.extensions:
            try:
                parser.set_defaults(**load_file(ex))
            except IOError:
                # couldn't open the file try to interpret as python
                d = {}
                exec ex in d, d
                parser.set_defaults(**d)

        # Now we parse args again with the config file settings as defaults
        options = parser.parse_args(self._args)
        config.load(options.__dict__)
        self.action = options.action

        self.normalize_config()
Esempio n. 7
0
    def load_config(self, parser):
        options = parser.parse_args(self._args)
        self.config_path = options.config
        
        def load_file(filename):
            f = self._open(filename, 'r')
            d = {'__file__': filename}
            exec f.read() in d,d
            return d

        parser.set_defaults(**load_file(options.config))

        for ex in options.extensions:
            try:
                parser.set_defaults(**load_file(ex))
            except IOError:
                # couldn't open the file try to interpret as python
                d = {}
                exec ex in d,d
                parser.set_defaults(**d)

        # Now we parse args again with the config file settings as defaults
        options = parser.parse_args(self._args)
        config.load(options.__dict__)
        self.action = options.action

        self.normalize_config()
Esempio n. 8
0
def run_server(config_file=""):
    if config_file != "":
        config_dir = os.path.dirname(config_file)
        if (not os.path.exists(config_dir)) or (not os.path.isdir(config_dir)):
            print "Error in the config file."
    else:
        config_dir = os.getcwd() + "/configs"
        config_file = config_dir + "/genesis2.conf"

    make_log(config_dir)
    logger = logging.getLogger("genesis2")
    logger.info("Genesis %s" % version())
    if os.path.isfile(config_file):
        logger.info("Using config file %s" % config_file)
    else:
        # Shutdown
        logger.critical("The %s is not a file." % config_file)
        exit(-1)

    # Read config
    config = Config()
    if os.path.exists(config_file) and os.path.isfile(config_file):
        config.load(config_file)
    else:
        logger.critical("The %s doesn't exist" % config_file)
        exit(-1)

    # (kudrom) TODO: I should delete the GenesisManager and substitute it with a Plugin
    GenesisManager(config)

    platform = detect_platform()
    logger.info("Detected platform: %s" % platform)

    # Load plugins
    import genesis2.plugins

    # Load apps
    path_apps = config.get("genesis2", "path_apps", None)
    if path_apps is None:
        path_apps = os.getcwd() + "/apps"
    logger.info("Using %s as path apps." % path_apps)
    appmgr = AppManager(path_apps=path_apps)
    appmgr.load_apps()

    # (kudrom) TODO: Register a new ComponentMgr

    # (kudrom) TODO: we should use an iptables plugin
    # Make sure correct kernel modules are enabled
    # genesis2.utils.shell('modprobe ip_tables')

    if not hasattr(genesis2.apis, "PGenesis2Server"):
        logger.error("There's no plugin for PGenesis2Server registered in the system")
        exit(-1)

    # The server is a plugin to ease its replacement
    logger.info("Starting server")
    server = getattr(genesis2.apis, "PGenesis2Server")
    server.initialize(config)
    server.serve_forever()
Esempio n. 9
0
	def __init__(self):
		try: #TODO: this should be nicer...		
			logging.config.fileConfig(os.path.join(PROJECT_PATH, 'logging.conf'), defaults={'logfilename': 'manager.log'})
		except Exception as e:
			print("Error while trying to load config file for logging")

		logging.info("Initializing manager")

		try:
			config.load(PROJECT_PATH +"/manager/config.json")
		except ValueError: # Config file can't be loaded, e.g. no valid JSON
			logging.exception("Wasn't able to load config file, exiting...")
			quit()

		try:
			db.connect(PROJECT_PATH)
			db.setup()
		except:
			logging.exception("Couldn't connect to database!")
			quit()
		
		self.notifiers = []
		self.received_data_counter = 0
		self.alarm_dir = "/var/tmp/secpi/alarms"
		self.current_alarm_dir = "/var/tmp/secpi/alarms"
		
		try:
			self.data_timeout = int(config.get("data_timeout"))
		except Exception: # if not specified in the config file we set a default value
			self.data_timeout = 180
			logging.debug("Couldn't find or use config parameter for data timeout in manager config file. Setting default value: %d" % self.data_timeout)
		
		try:
			self.holddown_timer = int(config.get("holddown_timer"))
		except Exception: # if not specified in the config file we set a default value
			self.holddown_timer = 210
			logging.debug("Couldn't find or use config parameter for holddown timer in manager config file. Setting default value: %d" % self.holddown_timer)

		self.holddown_state = False
		self.num_of_workers = 0

		self.connect()

		# debug output, setups & state
		setups = db.session.query(db.objects.Setup).all()
		rebooted = False
		for setup in setups:
			logging.debug("name: %s active:%s" % (setup.name, setup.active_state))
			if setup.active_state:
				rebooted = True

		if rebooted:
			self.setup_notifiers()
			self.num_of_workers = db.session.query(db.objects.Worker).join((db.objects.Action, db.objects.Worker.actions)).filter(db.objects.Worker.active_state == True).filter(db.objects.Action.active_state == True).count()

		logging.info("Setup done!")
Esempio n. 10
0
	def __init__(self):
		try: #TODO: this should be nicer...		
			logging.config.fileConfig(os.path.join(PROJECT_PATH, 'logging.conf'), defaults={'logfilename': 'manager.log'})
		except Exception as e:
			print("Error while trying to load config file for logging")

		logging.info("Initializing manager")

		try:
			config.load(PROJECT_PATH +"/manager/config.json")
		except ValueError: # Config file can't be loaded, e.g. no valid JSON
			logging.exception("Wasn't able to load config file, exiting...")
			quit()

		try:
			db.connect(PROJECT_PATH)
			db.setup()
		except:
			logging.exception("Couldn't connect to database!")
			quit()
		
		self.notifiers = []
		self.received_data_counter = 0
		self.alarm_dir = "/var/tmp/secpi/alarms"
		self.current_alarm_dir = "/var/tmp/secpi/alarms"
		try:
			self.data_timeout = int(config.get("data_timeout"))
			self.holddown_timer = int(config.get("holddown_timer"))
		except Exception: # if not specified in the config file we set default values for timeouts
			logging.debug("Couldn't find config parameters for timeouts in config file, using default values for timeouts")
			self.data_timeout = 10
			self.holddown_timer = 30
		self.holddown_state = False
		self.num_of_workers = 0


		self.connect()

		# debug output, setups & state
		setups = db.session.query(db.objects.Setup).all()
		rebooted = False
		for setup in setups:
			logging.debug("name: %s active:%s" % (setup.name, setup.active_state))
			if setup.active_state:
				rebooted = True

		if rebooted:
			self.setup_notifiers()
			self.num_of_workers = db.session.execute(text("select count(distinct w.id) as cnt from workers w join sensors s on w.id = s.worker_id join zones z on z.id = s.zone_id join zones_setups sz on sz.zone_id = z.id join setups se on se.id = sz.setup_id where se.active_state = 1 AND w.active_state = 1")).first()[0]

		logging.info("Setup done!")
Esempio n. 11
0
File: app.py Progetto: BenWu/poucave
def main(argv):
    logging.config.dictConfig(config.LOGGING)
    conf = config.load(config.CONFIG_FILE)

    checks = Checks.from_conf(conf)

    # If CLI arg is provided, run the check.
    if len(argv) >= 1 and argv[0] == "check":
        project = None
        name = None
        if len(argv) > 1:
            project = argv[1]
        if len(argv) > 2:
            name = argv[2]
        try:
            selected = checks.lookup(project=project, name=name)
        except ValueError as e:
            cprint(f"{e} in '{config.CONFIG_FILE}'", "red")
            return 2

        successes = []
        for check in selected:
            success = run_check(check)
            successes.append(success)

        return 0 if all(successes) else 1

    # Otherwise, run the Web app.
    app = init_app(checks)
    web.run_app(app, host=config.HOST, port=config.PORT, print=False)
Esempio n. 12
0
 def load_config(self) -> dict:
     """
     Load and parse config file
     : return : dict
         Loaded and parsed config
     """
     return config.load(self.config_path)
Esempio n. 13
0
def main():
    logging.config.fileConfig('logging.conf')
    parser = argparse.ArgumentParser(description='OSM Changeset diff filter')
    parser.add_argument('-l', dest='log_level', default='INFO',
                        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
                        help='Set the log level')
    parser.add_argument('--configdir', dest='configdir', default='.',
                        help='Set path to config file')
    parser.add_argument('--db', dest='db_url', default='mongodb://localhost:27017/',
                        help='Set url for database')
    subparsers = parser.add_subparsers()

    parser_diff_fetch = subparsers.add_parser('diff-fetch')
    parser_diff_fetch.set_defaults(func=diff_fetch)
    parser_diff_fetch.add_argument('--initptr', action='store_true', default=False,
                                   help='Reset OSM minutely diff pointer')
    parser_diff_fetch.add_argument('-H', dest='history', help='Define how much history to fetch')
    parser_diff_fetch.add_argument('--track', action='store_true', default=False,
                                   help='Fetch current and future minutely diffs')
    parser_diff_fetch.add_argument('--simulate', type=int, default=None, help='Simulate changeset observation')

    parser_csets_filter = subparsers.add_parser('csets-filter')
    parser_csets_filter.set_defaults(func=csets_filter)

    parser_csets_analyze = subparsers.add_parser('csets-analyze')
    parser_csets_analyze.set_defaults(func=csets_analyze)

    parser_run_backends = subparsers.add_parser('run-backends')
    parser_run_backends.set_defaults(func=run_backends)
    parser_run_backends.add_argument('--track', action='store_true', default=False,
                                   help='Track changes and re-run backends peridically')

    parser_worker = subparsers.add_parser('worker')
    parser_worker.set_defaults(func=worker)
    parser_worker.add_argument('--track', action='store_true', default=False,
                               help='Track changes and re-run worker tasks peridically')

    args = parser.parse_args()
    logging.getLogger('').setLevel(getattr(logging, args.log_level))

    config = configfile.Config()
    config.load(path=args.configdir)

    db = database.DataBase(url=args.db_url)
    logger.debug('Connected to db: {}'.format(db))

    return args.func(args, config, db)
Esempio n. 14
0
def main(argv):
    '''
    Run the vmcache command line utility.
    '''
    try:
        args = _parse(argv[1:])
        if args.debug:
            logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
        else:
            logging.basicConfig(level=logging.WARNING, stream=sys.stderr)
        config = Config(args.configfile)
        config.load()
        _start_log(config, args)
        args.func(config, args)
    except KeyboardInterrupt:
        sys.stderr.write('interrupted\n')
        return 1
Esempio n. 15
0
def main(argv):
    '''
    Run the vmcache command line utility.
    '''
    try:
        args = _parse(argv[1:])
        if args.debug:
            logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
        else:
            logging.basicConfig(level=logging.WARNING, stream=sys.stderr)
        config = Config(args.configfile)
        config.load()
        _start_log(config, args)
        args.func(config, args)
    except KeyboardInterrupt:
        sys.stderr.write('interrupted\n')
        return 1
Esempio n. 16
0
def main():
    args = _parse_args()
    _setup_logging(args)
    cfg = config.load(args.config)
    db_path = cfg['anime'].getpath('database')
    migrations.migrate(str(db_path))
    cmd = AnimeCmd(cfg)
    print(_INTRO)
    cmd.cmdloop()
Esempio n. 17
0
def initlog(path=None):
    """ Set up logging """
    logconf = config.load('logging.yaml', path)
    logging.config.dictConfig(logconf)
    log.critical("LOGLEVEL ENABLED: CRITICAL")
    log.error("LOGLEVEL ENABLED: ERROR")
    log.warn("LOGLEVEL ENABLED: WARN")
    log.info("LOGLEVEL ENABLED: INFO")
    log.debug("LOGLEVEL ENABLED: DEBUG")
Esempio n. 18
0
    def __init__(self):
        self.actions = []
        self.sensors = []
        self.active = False
        self.data_directory = "/var/tmp/secpi/worker_data"
        self.zip_directory = "/var/tmp/secpi"
        self.message_queue = []  # stores messages which couldn't be sent

        try:
            logging.config.fileConfig(os.path.join(PROJECT_PATH,
                                                   'logging.conf'),
                                      defaults={'logfilename': 'worker.log'})
        except Exception as e:
            print("Error while trying to load config file for logging")

        logging.info("Initializing worker")

        try:
            config.load(PROJECT_PATH + "/worker/config.json")
            logging.debug("Config loaded")
        except ValueError:  # Config file can't be loaded, e.g. no valid JSON
            logging.error("Wasn't able to load config file, exiting...")
            quit()

        time.sleep(60)  # TEMPORARY FIX for #83

        self.prepare_data_directory(self.data_directory)
        self.connect()

        # if we don't have a pi id we need to request the initial config, afterwards we have to reconnect
        # to the queues which are specific to the pi id -> hence, call connect again
        if not config.get('pi_id'):
            logging.debug(
                "No Pi ID found, will request initial configuration...")
            self.fetch_init_config()
        else:
            logging.info("Setting up sensors and actions")
            self.active = config.get('active')
            self.setup_sensors()
            self.setup_actions()
            logging.info("Setup done!")
Esempio n. 19
0
	def __init__(self):
		self.actions = []
		self.sensors = []
		self.active = False
		self.data_directory = "/var/tmp/secpi/worker_data"
		self.zip_directory = "/var/tmp/secpi"
		self.message_queue = [] # stores messages which couldn't be sent
		
		try:
			logging.config.fileConfig(os.path.join(PROJECT_PATH, 'logging.conf'), defaults={'logfilename': 'worker.log'})
		except Exception as e:
			print("Error while trying to load config file for logging")

		logging.info("Initializing worker")

		try:
			config.load(PROJECT_PATH +"/worker/config.json")
			logging.debug("Config loaded")
		except ValueError: # Config file can't be loaded, e.g. no valid JSON
			logging.error("Wasn't able to load config file, exiting...")
			quit()
		
		time.sleep(60) # TEMPORARY FIX for #83

		self.prepare_data_directory(self.data_directory)
		self.connect()
		
		# if we don't have a pi id we need to request the initial config, afterwards we have to reconnect
		# to the queues which are specific to the pi id -> hence, call connect again
		if not config.get('pi_id'):
			logging.debug("No Pi ID found, will request initial configuration...")
			self.fetch_init_config()
		else:
			logging.info("Setting up sensors and actions")
			self.active = config.get('active')
			self.setup_sensors()
			self.setup_actions()
			logging.info("Setup done!")
Esempio n. 20
0
def arg_parser():
    daemon = Daemon('watch_process.pid', main)
    parser = argparse.ArgumentParser(
        description='Stream download and upload, not only for bilibili.')
    parser.add_argument('--version',
                        action='version',
                        version=f"v{__version__}")
    parser.add_argument('-v',
                        '--verbose',
                        action="store_const",
                        const=logging.DEBUG,
                        help="Increase output verbosity")
    parser.add_argument(
        '--config',
        type=argparse.FileType(encoding='UTF-8'),
        help='Location of the configuration file (default "./config.yaml")')
    subparsers = parser.add_subparsers(
        help='Windows does not support this sub-command.')
    # create the parser for the "start" command
    parser_start = subparsers.add_parser('start',
                                         help='Run as a daemon process.')
    parser_start.set_defaults(func=daemon.start)
    parser_stop = subparsers.add_parser(
        'stop', help='Stop daemon according to "watch_process.pid".')
    parser_stop.set_defaults(func=daemon.stop)
    parser_restart = subparsers.add_parser('restart')
    parser_restart.set_defaults(func=daemon.restart)
    parser.set_defaults(func=lambda: asyncio.run(main()))
    args = parser.parse_args()
    config.load(args.config)
    LOG_CONF.update(config.get('LOGGING', {}))
    if args.verbose:
        LOG_CONF['loggers']['biliup']['level'] = args.verbose
        LOG_CONF['root']['level'] = args.verbose
    logging.config.dictConfig(LOG_CONF)
    if platform.system() == 'Windows':
        return asyncio.run(main())
    args.func()
Esempio n. 21
0
def load_config(conf_dir):
    pattern = os.path.join(conf_dir, "conf.d", "*.conf")
    files = glob.glob(pattern)

    # Trying to use invalid configuration directory is a user error or
    # broken installation. Failing fast will help to fix the issue.
    # https://github.com/oVirt/ovirt-imageio/issues/33
    if not files:
        raise ValueError(f"Could not find {pattern}")

    # Vendor may override application defaults if needed.
    pattern = os.path.join(VENDOR_CONF_DIR, "conf.d", "*.conf")
    files.extend(glob.glob(pattern))

    # Override files based on file name sort order:
    # - /var/lib/ovirt-imageio/conf.d/75-vendor.conf overrides
    #   /etc/ovirt-imageio/conf.d/50-vdsm.conf.
    # - /etc/ovirt-imageio/conf.d/99-user.conf overrides
    #   /var/lib/ovirt-imageio/conf.d/75-vendor.conf.
    files.sort(key=os.path.basename)

    return config.load(files)
Esempio n. 22
0
def main():
    args = parse_args()
    configure_logger(args)
    try:
        log.info("Starting (pid=%s, version=%s)", os.getpid(), version.string)
        cfg = config.load([os.path.join(args.conf_dir, "daemon.conf")])

        server = Server(cfg)
        signal.signal(signal.SIGINT, server.terminate)
        signal.signal(signal.SIGTERM, server.terminate)

        server.start()
        try:
            systemd.daemon.notify("READY=1")
            log.info("Ready for requests")
            while server.running:
                signal.pause()
        finally:
            server.stop()
        log.info("Stopped")
    except Exception:
        log.exception("Server failed")
        sys.exit(1)
Esempio n. 23
0
File: main.py Progetto: SecPi/SecPi
from sites.setups import SetupsPage
from sites.alarms import AlarmsPage
from sites.workers import WorkersPage
from sites.actions import ActionsPage
from sites.notifiers import NotifiersPage
from sites.actionparams import ActionParamsPage
from sites.notifierparams import NotifierParamsPage
from sites.sensorparams import SensorParamsPage
from sites.logs import LogEntriesPage
from sites.setupszones import SetupsZonesPage
from sites.workersactions import WorkersActionsPage

from sites.alarmdata import AlarmDataPage


config.load(PROJECT_PATH +"/webinterface/config.json")

class Root(object):

	def __init__(self):
		cherrypy.log("Initializing Webserver")
		
		cherrypy.config.update({'request.error_response': self.handle_error})
		cherrypy.config.update({'error_page.404': self.error_404})
		cherrypy.config.update({'error_page.401': self.error_401})
		
		self.sensors = SensorsPage()
		self.zones = ZonesPage()
		self.setups = SetupsPage()
		self.alarms = AlarmsPage()
		self.workers = WorkersPage()
Esempio n. 24
0
def main(heartbeat_stop_callback=None):
  global config
  parser = OptionParser()
  parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="verbose log output", default=False)
  parser.add_option("-e", "--expected-hostname", dest="expected_hostname", action="store",
                    help="expected hostname of current host. If hostname differs, agent will fail", default=None)
  (options, args) = parser.parse_args()

  expected_hostname = options.expected_hostname

  logging_level = logging.DEBUG if options.verbose else logging.INFO

  setup_logging(logger, AmbariConfig.AmbariConfig.getLogFile(), logging_level)
  global is_logger_setup
  is_logger_setup = True
  setup_logging(alerts_logger, AmbariConfig.AmbariConfig.getAlertsLogFile(), logging_level)
  Logger.initialize_logger('resource_management', logging_level=logging_level)

  # use the host's locale for numeric formatting
  try:
    locale.setlocale(locale.LC_ALL, '')
  except locale.Error as ex:
    logger.warning("Cannot set locale for ambari-agent. Please check your systemwide locale settings. Failed due to: {0}.".format(str(ex)))

  default_cfg = {'agent': {'prefix': '/home/ambari'}}
  config.load(default_cfg)

  if (len(sys.argv) > 1) and sys.argv[1] == 'stop':
    stop_agent()

  if (len(sys.argv) > 2) and sys.argv[1] == 'reset':
    reset_agent(sys.argv)

  # Check for ambari configuration file.
  resolve_ambari_config()
  
  # Add syslog hanlder based on ambari config file
  add_syslog_handler(logger)

  # Starting data cleanup daemon
  data_cleaner = None
  if config.has_option('agent', 'data_cleanup_interval') and int(config.get('agent','data_cleanup_interval')) > 0:
    data_cleaner = DataCleaner(config)
    data_cleaner.start()

  perform_prestart_checks(expected_hostname)

  # Starting ping port listener
  try:
    #This acts as a single process machine-wide lock (albeit incomplete, since
    # we still need an extra file to track the Agent PID)
    ping_port_listener = PingPortListener(config)
  except Exception as ex:
    err_message = "Failed to start ping port listener of: " + str(ex)
    logger.error(err_message)
    sys.stderr.write(err_message)
    sys.exit(1)
  ping_port_listener.start()

  update_log_level(config)

  if not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
    daemonize()

  #
  # Iterate through the list of server hostnames and connect to the first active server
  #

  active_server = None
  server_hostnames = hostname.server_hostnames(config)

  connected = False
  stopped = False

  # Keep trying to connect to a server or bail out if ambari-agent was stopped
  while not connected and not stopped:
    for server_hostname in server_hostnames:
      try:
        server_ip = socket.gethostbyname(server_hostname)
        server_url = config.get_api_url(server_hostname)
        logger.info('Connecting to Ambari server at %s (%s)', server_url, server_ip)
      except socket.error:
        logger.warn("Unable to determine the IP address of the Ambari server '%s'", server_hostname)

      # Wait until MAX_RETRIES to see if server is reachable
      netutil = NetUtil(config, heartbeat_stop_callback)
      (retries, connected, stopped) = netutil.try_to_connect(server_url, MAX_RETRIES, logger)

      # if connected, launch controller
      if connected:
        logger.info('Connected to Ambari server %s', server_hostname)
        # Set the active server
        active_server = server_hostname
        # Launch Controller communication
        controller = Controller(config, server_hostname, heartbeat_stop_callback)
        controller.start()
        while controller.is_alive():
          time.sleep(0.1)

      #
      # If Ambari Agent connected to the server or
      # Ambari Agent was stopped using stop event
      # Clean up if not Windows OS
      #
      if connected or stopped:
        ExitHelper().exit(0)
        logger.info("finished")
        break
    pass # for server_hostname in server_hostnames
  pass # while not (connected or stopped)

  return active_server
Esempio n. 25
0
def main():
    logging.config.fileConfig('logging.conf')
    parser = argparse.ArgumentParser(description='OSM Changeset diff filter')
    parser.add_argument(
        '-l',
        dest='log_level',
        default='INFO',
        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
        help='Set the log level')
    parser.add_argument('--configdir',
                        dest='configdir',
                        default='.',
                        help='Set path to config file')
    parser.add_argument('--db',
                        dest='db_url',
                        default='mongodb://localhost:27017/',
                        help='Set url for database')
    parser.add_argument('--metrics',
                        dest='metrics',
                        action='store_true',
                        default=False,
                        help='Enable metrics through Prometheus client API')
    parser.add_argument('--metricsport',
                        dest='metricsport',
                        type=int,
                        default=8000,
                        help='Port through which to serve metrics')
    subparsers = parser.add_subparsers()

    parser_diff_fetch = subparsers.add_parser('diff-fetch')
    parser_diff_fetch.set_defaults(func=diff_fetch)
    parser_diff_fetch.add_argument('--initptr',
                                   action='store_true',
                                   default=False,
                                   help='Reset OSM minutely diff pointer')
    parser_diff_fetch.add_argument('-H',
                                   dest='history',
                                   help='Define how much history to fetch')
    parser_diff_fetch.add_argument(
        '--track',
        action='store_true',
        default=False,
        help='Fetch current and future minutely diffs')
    parser_diff_fetch.add_argument('--simulate',
                                   type=int,
                                   default=None,
                                   help='Simulate changeset observation')

    parser_csets_filter = subparsers.add_parser('csets-filter')
    parser_csets_filter.set_defaults(func=csets_filter)

    parser_csets_analyze = subparsers.add_parser('csets-analyze')
    parser_csets_analyze.set_defaults(func=csets_analyze)

    parser_run_backends = subparsers.add_parser('run-backends')
    parser_run_backends.set_defaults(func=run_backends)
    parser_run_backends.add_argument(
        '--track',
        action='store_true',
        default=False,
        help='Track changes and re-run backends periodically')

    parser_worker = subparsers.add_parser('worker')
    parser_worker.set_defaults(func=worker)
    parser_worker.add_argument(
        '--track',
        action='store_true',
        default=False,
        help='Track changes and re-run worker tasks periodically')

    parser_supervisor = subparsers.add_parser('supervisor')
    parser_supervisor.set_defaults(func=supervisor)
    parser_supervisor.add_argument(
        '--track',
        action='store_true',
        default=False,
        help='Track changes and re-run supervisor tasks periodically')

    args = parser.parse_args()
    logging.getLogger('').setLevel(getattr(logging, args.log_level))

    config = configfile.Config()
    config.load(path=args.configdir)

    if args.metrics:
        prometheus_client.start_http_server(args.metricsport)

    if args.func == diff_fetch:
        dbadm = True
    else:
        dbadm = False
    db = database.DataBase(url=args.db_url, admin=dbadm)
    logger.debug('Connected to db: {} (RW={})'.format(db, dbadm))

    return args.func(args, config, db)
Esempio n. 26
0
    def __init__(self):
        try:  #TODO: this should be nicer...
            logging.config.fileConfig(os.path.join(PROJECT_PATH,
                                                   'logging.conf'),
                                      defaults={'logfilename': 'manager.log'})
        except Exception as e:
            print("Error while trying to load config file for logging")

        logging.info("Initializing manager")

        try:
            config.load(PROJECT_PATH + "/manager/config.json")
        except ValueError:  # Config file can't be loaded, e.g. no valid JSON
            logging.exception("Wasn't able to load config file, exiting...")
            quit()

        try:
            db.connect(PROJECT_PATH)
            db.setup()
        except:
            logging.exception("Couldn't connect to database!")
            quit()

        self.notifiers = []
        self.received_data_counter = 0
        self.alarm_dir = "/var/tmp/secpi/alarms"
        self.current_alarm_dir = "/var/tmp/secpi/alarms"

        try:
            self.data_timeout = int(config.get("data_timeout"))
        except Exception:  # if not specified in the config file we set a default value
            self.data_timeout = 180
            logging.debug(
                "Couldn't find or use config parameter for data timeout in manager config file. Setting default value: %d"
                % self.data_timeout)

        try:
            self.holddown_timer = int(config.get("holddown_timer"))
        except Exception:  # if not specified in the config file we set a default value
            self.holddown_timer = 210
            logging.debug(
                "Couldn't find or use config parameter for holddown timer in manager config file. Setting default value: %d"
                % self.holddown_timer)

        self.holddown_state = False
        self.num_of_workers = 0

        self.connect()

        # debug output, setups & state
        setups = db.session.query(db.objects.Setup).all()
        rebooted = False
        for setup in setups:
            logging.debug("name: %s active:%s" %
                          (setup.name, setup.active_state))
            if setup.active_state:
                rebooted = True

        if rebooted:
            self.setup_notifiers()
            self.num_of_workers = db.session.query(db.objects.Worker).join(
                (db.objects.Action, db.objects.Worker.actions)).filter(
                    db.objects.Worker.active_state == True).filter(
                        db.objects.Action.active_state == True).count()

        logging.info("Setup done!")
Esempio n. 27
0
def load_config(args):
    files = find_configs([VENDOR_CONF_DIR, args.conf_dir])
    return config.load(files)
Esempio n. 28
0
def main(heartbeat_stop_callback=None):
  global config
  parser = OptionParser()
  parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="verbose log output", default=False)
  parser.add_option("-e", "--expected-hostname", dest="expected_hostname", action="store",
                    help="expected hostname of current host. If hostname differs, agent will fail", default=None)
  (options, args) = parser.parse_args()

  expected_hostname = options.expected_hostname

  setup_logging(logger, AmbariConfig.AmbariConfig.getLogFile(), options.verbose)
  global is_logger_setup
  is_logger_setup = True
  setup_logging(alerts_logger, AmbariConfig.AmbariConfig.getAlertsLogFile(), options.verbose)

  default_cfg = {'agent': {'prefix': '/home/ambari'}}
  config.load(default_cfg)

  if (len(sys.argv) > 1) and sys.argv[1] == 'stop':
    stop_agent()

  if (len(sys.argv) > 2) and sys.argv[1] == 'reset':
    reset_agent(sys.argv)

  # Check for ambari configuration file.
  resolve_ambari_config()
  
  # Add syslog hanlder based on ambari config file
  add_syslog_handler(logger)

  # Starting data cleanup daemon
  data_cleaner = None
  if config.has_option('agent', 'data_cleanup_interval') and int(config.get('agent','data_cleanup_interval')) > 0:
    data_cleaner = DataCleaner(config)
    data_cleaner.start()

  perform_prestart_checks(expected_hostname)

  # Starting ping port listener
  try:
    #This acts as a single process machine-wide lock (albeit incomplete, since
    # we still need an extra file to track the Agent PID)
    ping_port_listener = PingPortListener(config)
  except Exception as ex:
    err_message = "Failed to start ping port listener of: " + str(ex)
    logger.error(err_message)
    sys.stderr.write(err_message)
    sys.exit(1)
  ping_port_listener.start()

  update_log_level(config)

  server_hostname = hostname.server_hostname(config)
  server_url = config.get_api_url()

  if not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
    daemonize()

  try:
    server_ip = socket.gethostbyname(server_hostname)
    logger.info('Connecting to Ambari server at %s (%s)', server_url, server_ip)
  except socket.error:
    logger.warn("Unable to determine the IP address of the Ambari server '%s'", server_hostname)

  # Wait until server is reachable
  netutil = NetUtil(heartbeat_stop_callback)
  retries, connected = netutil.try_to_connect(server_url, -1, logger)
  # Ambari Agent was stopped using stop event
  if connected:
    # Launch Controller communication
    controller = Controller(config, heartbeat_stop_callback)
    controller.start()
    controller.join()
  if not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
    ExitHelper.execute_cleanup()
    stop_agent()
  logger.info("finished")
Esempio n. 29
0
def create_app(config_filename):
    app = flask.Flask(__name__)
    app.config.update(config.load(config_filename))
    app.register_blueprint(handlers.root)
    return app
Esempio n. 30
0
def test_cbs(run_config_file_name: str, out_file_name: str = None, random_seed=None, permutations=None):
    rc = config.load(run_config_file_name)

    if permutations is not None:
        rc.permutations = permutations

    random.seed(random_seed)

    if out_file_name:
        base_out_path = pathlib.Path(out_file_name)
    else:
        base_out_path = pathlib.Path(rc.map_file_name).parent.joinpath(
            f'routes-{time_utils.get_current_time_stamp()}.csv')

    g = grid2d.Grid2D.from_file(pathlib.Path(rc.map_file_name))

    if not rc.start:
        rc.start = g.get_random_free_cell()

    if not rc.end:
        rc.end = g.get_random_free_cell({rc.start})

    agent_count = len(rc.agents)

    agents_have_start = False
    agents_have_end = False
    for a in rc.agents:
        if a.get('start_cell'):
            agents_have_start = True
        if a.get('goal_cell'):
            agents_have_end = True

    if not agents_have_start:
        start_cells = [rc.start] + g.find_free_cells_around(rc.start, agent_count - 1)
    else:
        start_cells = [a['start_cell'] for a in rc.agents]

    if not agents_have_end:
        end_cells = [rc.end] + g.find_free_cells_around(rc.end, agent_count - 1, set(start_cells))
    else:
        end_cells = [a['goal_cell'] for a in rc.agents]

    for a, sc, gc in zip(rc.agents, start_cells, end_cells):
        a['start_cell'] = sc
        a['goal_cell'] = gc

    LOG.info(f'STARTING mapf test, run_config: {rc}, base_out_path: {base_out_path}')

    for permutation_idx in range(rc.permutations):
        LOG.info(f'STARTED permutation {permutation_idx:03d}/{rc.permutations:03d}')
        # random.shuffle(rc.agents)
        agents = [agent.Agent(**a) for a in rc.agents]
        cbs_finder = cbs.CbsMafpFinder(g)
        agents_repo, total_cost = cbs_finder.find_path(agent_repository.AgentRepository(agents), astar.Searcher,
                                                       lambda agnt: dict(
                                                           h_func=mapf_heuristics.get_good_manhatten_like_heuristic(
                                                               agnt)))

        for a in agents_repo.agents:
            LOG.debug(
                f"[{permutation_idx:03d}/{rc.permutations:3d}]:: Agent: {a.id}, path len: {len(a.path)} path cost: "
                f"{a.path_cost}, expanded nodes: {a.expanded_nodes}")
            print(g.to_str(a.cells_path(), a.start_cell, a.goal_cell, path_chr=str(a.id)[0]))

        out_path = base_out_path.parent / (base_out_path.stem + f'-{permutation_idx:03d}' + base_out_path.suffix)

        cbs_finder.save_paths(agents_repo, out_path)
        LOG.info(f'FINISHED permutation {permutation_idx:03d}/{rc.permutations:03d} => {out_path}')
        cbs_finder.validate_paths(g, agents_repo)
Esempio n. 31
0
def e2e_parallel(run_config_file_name: str, out_dir: str = None, random_seed=None, permutations=None,
                 map_file_name: str = None, max_adv_agent_ds: int=None):# int = 2):
    logging.getLogger('vgmapf.problems.mapf.multi_agent_pathfinding').setLevel(logging.INFO)

    cores_count = multiprocessing.cpu_count()

    LOG.info(f'Detected {cores_count} cores!')

    print(run_config_file_name)
    #rc1 = config.load('config-simple.yml')
    rc1 = config.load(run_config_file_name)
    if permutations is not None:
        rc1.permutations = permutations
    if map_file_name:
        rc1.map_file_name = map_file_name
    random.seed(random_seed)


    max_adv_agent_ds = rc1.robust_route


    if out_dir is None:
        out_dir = pathlib.Path(__file__).parent / 'outputs' / time_utils.get_current_time_stamp()
    else:
        out_dir = pathlib.Path(out_dir)

    out_dir.mkdir(parents=True, exist_ok=True)
    out_dir1 = out_dir

    grid = grid2d.Grid2D.from_file(pathlib.Path(rc1.map_file_name))
    _update_start_and_goal_cells(rc1, grid)

    print(grid.to_str(start=rc1.start, end=rc1.end))

    start_time = timeit.default_timer()

    max_agents = len(rc1.agents)
    for swarm_ammount in range(2, max_agents+1): #(max_agents, max_agents+1):
        rc = rc1
        # Make folders for number of agents
        swarm_out_dir = out_dir1 / str(len(rc.agents))
        swarm_out_dir.mkdir(parents=True, exist_ok=True)
        out_dir = swarm_out_dir


        # Stage 1 - build normal paths
        LOG.info('START 01 - building normal paths')

        normal_paths_dir = out_dir / STAGE_01_NORMAL_PATHS

        normal_paths_dir.mkdir(parents=True, exist_ok=True)

        tasks = [(normal_paths_dir, grid, rc, permutation_idx) for permutation_idx in range(rc.permutations)]

        with multiprocessing.Pool(processes=cores_count) as pool:
            pool.starmap(_stage_1_normal_paths, tasks)

        LOG.info('FINISH 01 - building normal paths')

        LOG.info('STARTED 02 - run Robust Routes on normal paths')

        # Stage 2 - robust routes

        robust_paths_dir = out_dir / STAGE_02_ROBUST_PATHS
        robust_paths_dir.mkdir(parents=True, exist_ok=True)

        tasks = [(robust_paths_dir, grid, max_adv_agent_ds, p) for p in normal_paths_dir.iterdir()]

        with multiprocessing.Pool(processes=cores_count) as pool:
            pool.starmap(_stage_2_normal_robust, tasks)

        LOG.info('FINISHED 02 - run Robust Routes on normal paths')


        # #Stage 25 - run kamikaze on robust routes
        # LOG.info('STARTED 025 - run kamikaze on robust routes')
        #
        # kamikaze_on_robust_paths_dir = out_dir / STAGE_025_KAMIKAZE
        # kamikaze_on_robust_paths_dir.mkdir(parents=True, exist_ok=True)
        #
        # kamikaze_on_robust_results_summary = kamikaze_on_robust_paths_dir / '025-kamikaze_on_robust_paths.csv'
        #
        # # noinspection DuplicatedCode
        # tasks = [(kamikaze_on_robust_paths_dir, rc, p) for p in robust_paths_dir.iterdir()]
        # with multiprocessing.Pool(processes=cores_count) as pool:
        #     results = pool.starmap(_stage_25_kamikaze, tasks)
        #
        # if results:
        #     with kamikaze_on_robust_results_summary.open('w', newline='') as fresults:
        #         out_csv = csv.DictWriter(fresults, vars(results[0]).keys())
        #         out_csv.writeheader()
        #
        #         for row in results:
        #             try:
        #                 out_csv.writerow(vars(row))
        #             except Exception:
        #                 LOG.warning(f'Failed writing row: {row}', exc_info=True)
        #
        #         fresults.flush()
        #
        # LOG.info('FINISHED 025 - run kamikaze on robust routes')

        # Stage 3 - run MDR on normal paths

        LOG.info('STARTED 03 - run MDR on normal paths')

        mdr_on_normal_paths_dir = out_dir / STAGE_03_MDR_NORMAL_PATHS
        mdr_on_normal_paths_dir.mkdir(parents=True, exist_ok=True)

        mdr_on_normal_results_summary = mdr_on_normal_paths_dir / '03-mdr_on_normal_paths-results.csv'

        #tasks = [
        #    (mdr_on_normal_paths_dir, adv_agent.id, adv_agent_ds, rc, p) for p in normal_paths_dir.iterdir() for adv_agent
        #    in
        #    paths_serializer.load(p).agents for adv_agent_ds in range(1, max_adv_agent_ds + 1)
        #]

        tasks = [
            (mdr_on_normal_paths_dir, adv_agent.id, adv_agent_ds, rc, p) for p in normal_paths_dir.iterdir() for
            adv_agent
            in
            paths_serializer.load(p).agents for adv_agent_ds in range(1, max_adv_agent_ds + 1)
        ]

        LOG.debug(f'stage_3 tasks:\n\t' + '\n\t'.join(str(x) for x in tasks))
        with multiprocessing.Pool(processes=cores_count) as pool:
            results = pool.starmap(_stage_3_normal_mdr, tasks)

        if results:
            with mdr_on_normal_results_summary.open('w', newline='') as fresults:
                out_csv = csv.DictWriter(fresults, vars(results[0]).keys())
                out_csv.writeheader()

                for row in results:
                    try:
                        out_csv.writerow(vars(row))
                    except Exception:
                        LOG.warning( f'Failed writing row: {row}', exc_info=True)

        LOG.info('FINISHED 03 - run MDR on normal paths')

        LOG.info('STARTED 04 - run MDR on robust paths')

        # Stage 4 - MDR on robust paths

        mdr_on_robust_paths_dir = out_dir / STAGE_04_MDR_ROBUST_PATHS
        mdr_on_robust_paths_dir.mkdir(parents=True, exist_ok=True)

        mdr_on_robust_results_summary = mdr_on_robust_paths_dir / '04-mdr_on_robust_paths-results.csv'

        tasks = [(mdr_on_robust_paths_dir, rc, p) for p in robust_paths_dir.iterdir()]
        with multiprocessing.Pool(processes=cores_count) as pool:
            results = pool.starmap(_stage_4_robust_mdr, tasks)

        if results:
            with mdr_on_robust_results_summary.open('w', newline='') as fresults:
                out_csv = csv.DictWriter(fresults, vars(results[0]).keys())
                out_csv.writeheader()

                for row in results:
                    try:
                        out_csv.writerow(vars(row))
                    except Exception:
                        LOG.warning(f'Failed writing row: {row}', exc_info=True)

                fresults.flush()

        end_time = timeit.default_timer()
        LOG.info(
            f'FINISHED 04 - run MDR on robust paths, elapsed:{end_time - start_time:2f} = {datetime.timedelta(seconds=end_time - start_time)}')
        del rc1.agents[-1]
Esempio n. 32
0
def mapf(run_config_file_name: str, out_file_name: str = None, random_seed=None, permutations: int = None,
         map_file_name: str = None):
    rc = config.load(run_config_file_name)

    if permutations is not None:
        rc.permutations = permutations

    if map_file_name:
        rc.map_file_name = map_file_name

    random.seed(random_seed)

    if out_file_name:
        base_out_path = pathlib.Path(out_file_name)
    else:
        timestamp = time_utils.get_current_time_stamp()
        base_out_path = pathlib.Path(__file__).parent.joinpath(
            'routes',
            timestamp,
            f'paths-{timestamp}.path'
        )
        base_out_path.parent.mkdir(parents=True, exist_ok=True)

    g = grid2d.Grid2D.from_file(pathlib.Path(rc.map_file_name))

    _update_start_and_goal_cells(rc, g)

    LOG.info(f'STARTING mapf test, run_config: {rc}, base_out_path: {base_out_path}')

    for permutation_idx in range(rc.permutations):
        with benchmark_utils.time_it(f'Building path #{permutation_idx}'):
            LOG.info(f'STARTED permutation {permutation_idx + 1:03d}/{rc.permutations:03d}')
            if permutation_idx > 0:
                random.shuffle(rc.agents)
            agents = [agent.Agent(**a) for a in rc.agents]
            mf = multi_agent_pathfinding.MapfFinder(g, agents)
            mf.find_paths(astar.Searcher,
                          lambda agnt: dict(h_func=mapf_heuristics.get_good_manhatten_like_heuristic(agnt)))

            for a in mf.agents:
                LOG.debug(
                    f"[{permutation_idx + 1:03d}/{rc.permutations:03d}]:: Agent: {a.id}, path len: {len(a.path)} "
                    f"path cost: {a.path_cost}, expanded nodes: {a.expanded_nodes}")

                print(g.to_str(a.cells_path(), a.start_cell, a.goal_cell, path_chr=str(a.id)[0]))

            out_path_base = base_out_path.parent / (
                    base_out_path.stem + f'-{permutation_idx:03d}' + base_out_path.suffix)
            mf.save_paths(out_path_base)
            LOG.info(f'FINISHED permutation {permutation_idx + 1:03d}/{rc.permutations:03d} => {out_path_base}')
            mf.validate_paths()

            robust_route = RobustPathMode(rc.robust_route)

            if robust_route == RobustPathMode.OFFLINE:
                makespan_original = mf.agents_repo.get_makespan()

                for agnt in agents:
                    if not agnt.is_adversarial:
                        agnt.path = None
                        agnt.path_cost = 0
                        agnt.expanded_nodes = 0

                mf_robust = multi_agent_pathfinding.MapfFinder(g, agents,
                                                               adv_agent_radiuses={a.id: a.damage_steps * 2 for a in
                                                                                   agents
                                                                                   if a.is_adversarial})
                mf_robust.find_paths(astar.Searcher,
                                     lambda agnt: dict(h_func=mapf_heuristics.get_good_manhatten_like_heuristic(agnt)))

                for a in mf_robust.agents:
                    LOG.debug(
                        f"[{permutation_idx + 1:03d}/{rc.permutations:03d}]:: Agent: {a.id}, path len: {len(a.path)} "
                        f"path cost: {a.path_cost}, expanded nodes: {a.expanded_nodes}")

                    print(g.to_str(a.cells_path(), a.start_cell, a.goal_cell, path_chr=str(a.id)[0]))

                out_path_robust = base_out_path.parent / (base_out_path.stem + f'-{permutation_idx:03d}-robust'
                                                          + base_out_path.suffix)
                mf_robust.save_paths(out_path_robust)
                LOG.info(f'FINISHED permutation {permutation_idx + 1:03d}/{rc.permutations:03d} => {out_path_robust}')
                mf_robust.validate_paths()
                makespan_robust = mf_robust.agents_repo.get_makespan()

                LOG.info(f'The difference in makespan is {makespan_robust - makespan_original}')

    return base_out_path.parent
Esempio n. 33
0
import att_iot_gateway.att_iot_gateway as IOT                              #provide cloud support

def sigterm_handler(_signo, _stack_frame):
    # Raises SystemExit(0):
    sys.exit(0)


def on_connected():
    if _connectedEvent:                         # if we reconnect during runtime, there is no more connected event.
        _connectedEvent.set()

_connectedEvent = Event()

try:
    signal.signal(signal.SIGTERM, sigterm_handler)
    config.load()
    IOT.on_connected = on_connected  # so we can wait for starting to run the modules untill we are connected with the broker.
    cloud.connect(modules.Actuate, processors.onAssetValueChanged)
    processors.load(config.processors)
    modules.load(config.modules)
    modules.syncGateway()
    logging.info("waiting for mqtt connection before starting all plugins")
    _connectedEvent.wait()
    _connectedEvent = None  # when we are done we no longer need this event, so remove.Only needed to wait, so plugins can send init values
    modules.run()
    if config.configs.has_option('webServer', 'enabled') and config.configs.get('webServer', 'enabled') == True:    # only load webserver if activated. Not all plugins need this, not all gateways want have a webserver running ex: fifthplay
        import pygate_core.webServer as webServer
        webServer.run()
    while 1:
        time.sleep(3)
except (KeyboardInterrupt, SystemExit):
Esempio n. 34
0
    def __init__(self, parent):
        super(MainPanel, self).__init__(parent)
        self.parent = parent
        self.games = config.get_games()
        self.game = None
        self.game_watcher = None
        while not self.games:
            error = wx.MessageDialog(parent=self,
                                     message="You have no games profiles set up. Replacing config with default.",
                                     caption="Info", style=wx.OK | wx.ICON_INFORMATION)
            error.ShowModal()
            error.Destroy()
            config.new()
            config.load()
            self.games = config.get_games()

        self.profile = wx.ComboBox(parent=self, choices=[game.name for game in self.games],
                                   style=wx.CB_READONLY)
        self.profile.SetSelection(0)

        self.track_list = ObjectListView(parent=self, style=wx.LC_REPORT | wx.BORDER_SUNKEN, sortable=True,
                                         useAlternateBackColors=False)
        self.track_list.SetEmptyListMsg("You currently do not have any sound files for this game.")
        self.track_list.SetColumns([
            ColumnDefn(title="#", fixedWidth=50, valueGetter="index", stringConverter="%i"),
            ColumnDefn(title="Title", width=250, valueGetter="name", minimumWidth=150, isSpaceFilling=True),
            ColumnDefn(title="Aliases", width=300, valueGetter="get_aliases", minimumWidth=200, isSpaceFilling=True),
            ColumnDefn(title="Bind", width=75, valueGetter="bind", minimumWidth=50, maximumWidth=120)
        ])
        self.track_list.rowFormatter = lambda x, y: x.SetTextColour(wx.RED) if y.get_aliases() == NO_ALIASES else None
        self.selected_track = None
        self.game_select(event=None)

        refresh_button = wx.Button(parent=self, label="Refresh tracks")
        self.start_stop_button = wx.Button(parent=self, label="Start")
        convert_button = wx.Button(parent=self, label="Audio converter")
        download_button = wx.Button(parent=self, label="Audio downloader")

        top_sizer = wx.BoxSizer(wx.VERTICAL)  # Root sizer
        profile_sizer = wx.BoxSizer(wx.VERTICAL)  # For the profile selection
        olv_sizer = wx.BoxSizer(wx.VERTICAL)  # For the ObjectListView
        button_sizer = wx.BoxSizer(wx.HORIZONTAL)  # Start/Stop and Refresh buttons

        profile_sizer.Add(self.profile, 0, wx.LEFT | wx.RIGHT | wx.EXPAND | wx.ALIGN_TOP, 5)
        olv_sizer.Add(self.track_list, 1, wx.LEFT | wx.RIGHT | wx.EXPAND | wx.ALIGN_TOP, 5)
        button_sizer.Add(self.start_stop_button, 0, wx.ALL | wx.ALIGN_LEFT, 5)
        button_sizer.Add(refresh_button, 0, wx.ALL | wx.ALIGN_LEFT, 5)
        button_sizer.Add(convert_button, 0, wx.ALL | wx.ALIGN_LEFT, 5)
        button_sizer.Add(download_button, 0, wx.ALL | wx.ALIGN_LEFT, 5)

        top_sizer.Add(profile_sizer, 0, wx.ALL | wx.EXPAND, 5)
        top_sizer.Add(olv_sizer, 1, wx.ALL | wx.EXPAND, 5)
        top_sizer.Add(button_sizer, 0, wx.ALL | wx.EXPAND, 5)
        top_sizer.SetSizeHints(self.parent)
        self.SetSizerAndFit(top_sizer)

        # Context menu
        self.context_menu = wx.Menu()
        set_aliases = self.context_menu.Append(wx.ID_ANY, "Set custom aliases")
        clear_aliases = self.context_menu.Append(wx.ID_ANY, "Clear custom aliases")
        set_bind = self.context_menu.Append(wx.ID_ANY, "Set bind")
        clear_bind = self.context_menu.Append(wx.ID_ANY, "Clear bind")
        clear_all = self.context_menu.Append(wx.ID_CLEAR, "Clear EVERYTHING (all tracks)")
        trim_file = self.context_menu.Append(wx.ID_CUT, "Trim audio file")

        self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, handler=self.list_right_click, source=self.track_list)
        self.Bind(wx.EVT_MENU, handler=self.set_aliases, source=set_aliases)
        self.Bind(wx.EVT_MENU, handler=self.clear_aliases, source=clear_aliases)
        self.Bind(wx.EVT_MENU, handler=self.set_bind, source=set_bind)
        self.Bind(wx.EVT_MENU, handler=self.clear_bind, source=clear_bind)
        self.Bind(wx.EVT_MENU, handler=self.clear_all, source=clear_all)
        self.Bind(wx.EVT_MENU, handler=self.trim_file, source=trim_file)

        self.Bind(wx.EVT_COMBOBOX, handler=self.game_select, source=self.profile)
        self.Bind(wx.EVT_BUTTON, handler=self.refresh, source=refresh_button)
        self.Bind(wx.EVT_BUTTON, handler=self.start_stop, source=self.start_stop_button)
        self.Bind(wx.EVT_BUTTON, handler=self.convert, source=convert_button)
        self.Bind(wx.EVT_BUTTON, handler=self.download, source=download_button)

        # self.Bind(wx.EVT_SIZE, handler=self.on_size)
        self.Bind(wx.EVT_CLOSE, handler=self.on_exit)
Esempio n. 35
0
import ast
from utils.config_manager import ConfigManager

APP_NAME = "notifyme"
APP_CHARSET = 'UTF-8'

# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

# Config file paths
CONFIGURATION_JSON_FILE = './config.json'
CONFIGURATION_JOSN_FILE_DEV = './config_development.json'

config = ConfigManager(CONFIGURATION_JSON_FILE, CONFIGURATION_JOSN_FILE_DEV)

LOG_ROOT_PATH = config.load('LOG_ROOT_PATH', 'logging', 'root_path')
RABBITMQ_SERVER = config.load('RABBITMQ_SERVER', 'bus', 'host')
RABBITMQ_USER = config.load('RABBITMQ_USER', 'bus', 'user')
RABBITMQ_PASSWORD = config.load('RABBITMQ_PASSWORD', 'bus', 'password')
RABBITMQ_QUEUE = config.load('RABBITMQ_QUEUE', 'bus', 'queue_name')
RABBIRMQ_EXCHANGE_ERROR = config.load('RABBIRMQ_EXCHANGE_ERROR', 'bus',
                                      'error_exchange')

SMTP_EMAIL = config.load('SMTP_EMAIL', 'smtp', 'email')
SMTP_HOST = config.load('SMTP_HOST', 'smtp', 'server')
SMTP_PORT = config.load('SMTP_PORT', 'smtp', 'port')
SMTP_FROM_NAME = config.load('SMTP_FROM_NAME', 'smtp', 'name')
SMTP_PASS = config.load('SMTP_PASS', 'smtp', 'password')
SEND_EMAILS = config.load('SMTP_SEND', 'smtp', 'name')
SMTP_TTLS = config.load('SMTP_TTLS', 'smtp', 'ttls')
Esempio n. 36
0
 def _init_config(self):
     cfg = config.load(self._path)
     if not self._cfg:
         self._cfg = cfg
         return
     self._override_cfg(self._cfg.getNode(), cfg.getNode())
Esempio n. 37
0
from sites.zones import ZonesPage
from sites.setups import SetupsPage
from sites.alarms import AlarmsPage
from sites.workers import WorkersPage
from sites.actions import ActionsPage
from sites.notifiers import NotifiersPage
from sites.actionparams import ActionParamsPage
from sites.notifierparams import NotifierParamsPage
from sites.sensorparams import SensorParamsPage
from sites.logs import LogEntriesPage
from sites.setupszones import SetupsZonesPage
from sites.workersactions import WorkersActionsPage

from sites.alarmdata import AlarmDataPage

config.load(PROJECT_PATH + "/webinterface/config.json")


class Root(object):
    def __init__(self):
        cherrypy.log("Initializing Webserver")

        cherrypy.config.update({'request.error_response': self.handle_error})
        cherrypy.config.update({'error_page.404': self.error_404})
        cherrypy.config.update({'error_page.401': self.error_401})

        self.sensors = SensorsPage()
        self.zones = ZonesPage()
        self.setups = SetupsPage()
        self.alarms = AlarmsPage()
        self.workers = WorkersPage()
Esempio n. 38
0
def main(*config_files,
         args=None,
         config_dirs=(),
         commands=(),
         config_dict=None):
    cwd = os.getcwd()
    if cwd not in sys.path:
        sys.path.insert(0, cwd)
    context.config.search_dirs.extend(config_dirs)

    if not commands:
        p = Path(sys.argv[0])
        if __package__ in (p.parent.name, p.name):
            commands += __name__,
        elif p.name.startswith('__'):
            commands += p.parent.name,
        else:
            commands += p.name,

    plugins = search_plugins()
    plugins.extend(search_plugins(*commands, force=True))
    for i in plugins:
        i.add_arguments(parser)

    if args is None:
        args, argv = parser.parse_known_args()
        cmds = list(commands)
        while argv and not argv[0].startswith('-'):
            cmds.append(argv.pop(0))
        if getattr(args, 'config', None):
            config_files += tuple(args.config)
        if getattr(args, 'config_stdin', None):
            assert not args.interact, 'Can not be used --config-stdin' \
                                      ' with --interact'
            config_dict = utils.load_from_fd(sys.stdin.buffer)
        if args.logging:
            logging.basicConfig(level=args.logging.upper())
    else:
        cmds, argv = list(commands), []

    config = context.config
    plugins.extend(search_plugins(*cmds))
    for p in plugins:
        args, argv = p.parse_known_args(args=argv, namespace=args)
        config.load(*p.configs)
        config.update(p.get_config())
    cmds = [cmd for cmd in cmds if cmd not in sys.modules]

    config.load(*config_files)
    config_dict and config.update(config_dict)

    def sum_g(list_groups):
        if list_groups:
            return set(reduce(operator.add, list_groups))

    run = partial(loop_run,
                  group_resolver=GroupResolver(
                      include=sum_g(args.groups),
                      exclude=sum_g(args.exclude_groups),
                      all_groups=args.exclude_groups is not None,
                      default=True,
                  ),
                  cmds=cmds,
                  argv=argv,
                  ns=args,
                  prompt=PROMPT)

    try:
        if args.multiprocessing:
            with context.processes():
                print(PROMPT)
                logger = multiprocessing.get_logger()
                processes = process_iter(config.get('processes', {}))
                for p in processes:
                    logger.info('Create process %s', p['name'])
                    p['process'] = create_process(p)
                while True:
                    multiprocessing.connection.wait(
                        map(lambda x: x['process'].sentinel, processes), )
                    for p in processes:
                        proc = p['process']  # type: multiprocessing.Process
                        if not proc.is_alive():
                            logger.critical('Recreate process %s', p['name'])
                            p['process'] = create_process(p)
                    time.sleep(1)

        elif args.interact:
            from .core.interact import shell
            args.print = lambda *args: None
            shell(run)
        elif args.interact_kernel:
            from .core.interact import kernel
            kernel(run)
        else:
            run()
    except KeyboardInterrupt:
        pass
    finally:
        for p in multiprocessing.active_children():
            os.kill(p.pid, signal.SIGTERM)
        t = time.monotonic()
        sentinels = [p.sentinel for p in multiprocessing.active_children()]
        while sentinels and time.monotonic() - t < args.shutdown_timeout:
            multiprocessing.connection.wait(sentinels)
            sentinels = [p.sentinel for p in multiprocessing.active_children()]
        while multiprocessing.active_children():
            print('killall children')
            for p in multiprocessing.active_children():
                os.kill(p.pid, signal.SIGKILL)
            time.sleep(0.3)
Esempio n. 39
0
def main(heartbeat_stop_callback=None):
  global config
  parser = OptionParser()
  parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="verbose log output", default=False)
  parser.add_option("-e", "--expected-hostname", dest="expected_hostname", action="store",
                    help="expected hostname of current host. If hostname differs, agent will fail", default=None)
  (options, args) = parser.parse_args()

  expected_hostname = options.expected_hostname

  current_user = getpass.getuser()
  
  logging_level = logging.DEBUG if options.verbose else logging.INFO
  setup_logging(logger, AmbariConfig.AmbariConfig.getLogFile(), logging_level)
  setup_logging(alerts_logger, AmbariConfig.AmbariConfig.getAlertsLogFile(), logging_level)
  Logger.initialize_logger('resource_management', logging_level=logging_level)

  default_cfg = {'agent': {'prefix': '/home/ambari'}}
  config.load(default_cfg)
  bind_signal_handlers(agentPid)

  if (len(sys.argv) > 1) and sys.argv[1] == 'stop':
    stop_agent()

  if (len(sys.argv) > 2) and sys.argv[1] == 'reset':
    reset_agent(sys.argv)

  # Check for ambari configuration file.
  resolve_ambari_config()
  
  # Add syslog hanlder based on ambari config file
  add_syslog_handler(logger)

  # Starting data cleanup daemon
  data_cleaner = None
  if config.has_option('agent', 'data_cleanup_interval') and int(config.get('agent','data_cleanup_interval')) > 0:
    data_cleaner = DataCleaner(config)
    data_cleaner.start()

  perform_prestart_checks(expected_hostname)

  # Starting ping port listener
  try:
    #This acts as a single process machine-wide lock (albeit incomplete, since
    # we still need an extra file to track the Agent PID)
    ping_port_listener = PingPortListener(config)
  except Exception as ex:
    err_message = "Failed to start ping port listener of: " + str(ex)
    logger.error(err_message)
    sys.stderr.write(err_message)
    sys.exit(1)
  ping_port_listener.start()

  update_log_level(config)

  server_hostname = hostname.server_hostname(config)
  server_url = config.get_api_url()

  if not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
    daemonize()

  try:
    server_ip = socket.gethostbyname(server_hostname)
    logger.info('Connecting to Ambari server at %s (%s)', server_url, server_ip)
  except socket.error:
    logger.warn("Unable to determine the IP address of the Ambari server '%s'", server_hostname)

  # Wait until server is reachable
  netutil = NetUtil(heartbeat_stop_callback)
  retries, connected = netutil.try_to_connect(server_url, -1, logger)
  # Ambari Agent was stopped using stop event
  if connected:
    # Launch Controller communication
    controller = Controller(config, heartbeat_stop_callback)
    controller.start()
    controller.join()
  if not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
    ExitHelper.execute_cleanup()
    stop_agent()
  logger.info("finished")
Esempio n. 40
0
File: env.py Progetto: Julian/Great
from contextlib import closing
import logging.config

from alembic import context
from sqlalchemy import pool

from great import config
from great.models.core import METADATA
from great.web import engine_from_config

CONFIG = config.load()
CONTEXT_CONFIG = dict(
    sqlalchemy_module_prefix="sqlalchemy.",
    target_metadata=METADATA,
)
logging.config.fileConfig(context.config.config_file_name)


def run_migrations_offline(config):
    """
    Run migrations in 'offline' mode.

    This configures the context with just a URL and not an Engine,
    though an Engine is acceptable here as well. By skipping the Engine
    creation we don't even need a DBAPI to be available.

    Calls to context.execute() here emit the given string to the script
    output.

    """
Esempio n. 41
0
async def sappservice(config_filename, loop):
    config = Config(config_filename, "", "")
    config.load()

    logging.config.dictConfig(copy.deepcopy(config["logging"]))

    log: logging.Logger = logging.getLogger("sappservice")

    log.info("Initializing matrix spring lobby appservice")
    log.info(f"Config file: {config_filename}")

    # def exception_hook(etype, value, trace):
    #     log.debug(traceback.format_exception(etype, value, trace))
    #
    # sys.excepthook = exception_hook

    ################
    #
    # Initialization
    #
    ################

    mebibyte = 1024**2

    server = config["homeserver.address"]
    domain = config["homeserver.domain"]
    verify_ssl = config["homeserver.verify_ssl"]

    as_token = config["appservice.as_token"]
    hs_token = config["appservice.hs_token"]

    bot_localpart = config["appservice.bot_username"]
    max_body_size = config["appservice.max_body_size"]

    hostname = config["appservice.hostname"]
    port = config["appservice.port"]
    client_name = config["spring.client_name"]
    rooms = config["bridge.rooms"]

    upgrade_table = UpgradeTable()

    db = PostgresDatabase(config["appservice.database"], upgrade_table)
    await db.start()

    state_store_db = PgASStateStore(db=db)
    await state_store_db.upgrade_table.upgrade(db.pool)

    appserv = AppService(
        server=server,
        domain=domain,
        verify_ssl=verify_ssl,
        as_token=as_token,
        hs_token=hs_token,
        bot_localpart=bot_localpart,
        loop=loop,
        id='appservice',
        state_store=state_store_db,
        aiohttp_params={"client_max_size": max_body_size * mebibyte})

    spring_lobby_client = SpringLobbyClient(appserv, config, loop=loop)

    await db.start()
    await appserv.start(hostname, port)
    await spring_lobby_client.start()

    ################
    #
    # Lobby events
    #
    ################

    @spring_lobby_client.bot.on("tasserver")
    async def on_lobby_tasserver(message):
        log.debug(f"on_lobby_tasserver {message}")
        if message.client.name == client_name:
            message.client._login()

    @spring_lobby_client.bot.on("clients")
    async def on_lobby_clients(message):
        log.debug(f"on_lobby_clients {message}")
        if message.client.name != client_name:
            channel = message.params[0]
            clients = message.params[1:]
            await spring_lobby_client.join_matrix_room(channel, clients)

    @spring_lobby_client.bot.on("joined")
    async def on_lobby_joined(message, user, channel):
        log.debug(f"LOBBY JOINED user: {user.username} room: {channel}")
        if user.username != "appservice":
            await spring_lobby_client.join_matrix_room(channel,
                                                       [user.username])

    @spring_lobby_client.bot.on("left")
    async def on_lobby_left(message, user, channel):
        log.debug(f"LOBBY LEFT user: {user.username} room: {channel}")

        if channel.startswith("__battle__"):
            return

        if user.username == "appservice":
            return

        await spring_lobby_client.leave_matrix_room(channel, [user.username])

    @spring_lobby_client.bot.on("said")
    async def on_lobby_said(message, user, target, text):
        if message.client.name == client_name:
            await spring_lobby_client.said(user, target, text)

    @spring_lobby_client.bot.on("saidex")
    async def on_lobby_saidex(message, user, target, text):
        if message.client.name == client_name:
            await spring_lobby_client.saidex(user, target, text)

    # @spring_lobby_client.bot.on("denied")
    # async def on_lobby_denied(message):
    #     return
    #     # if message.client.name != client_name:
    #     #    user = message.client.name
    #     #    await spring_appservice.register(user)

    # @spring_lobby_client.bot.on("adduser")
    # async def on_lobby_adduser(message):
    #     if message.client.name != client_name:
    #         username = message.params[0]
    #
    #         if username == "ChanServ":
    #             return
    #         if username == "appservice":
    #             return
    #
    #         await spring_lobby_client.login_matrix_account(username)

    # @spring_lobby_client.bot.on("removeuser")
    # async def on_lobby_removeuser(message):
    #     if message.client.name != client_name:
    #         username = message.params[0]
    #
    #         if username == "ChanServ":
    #             return
    #         if username == "appservice":
    #             return
    #
    #         await spring_lobby_client.logout_matrix_account(username)

    @spring_lobby_client.bot.on("accepted")
    async def on_lobby_accepted(message):
        log.debug(f"message Accepted {message}")
        await spring_lobby_client.config_rooms()
        await spring_lobby_client.sync_matrix_users()

    @spring_lobby_client.bot.on("failed")
    async def on_lobby_failed(message):
        log.debug(f"message FAILED {message}")

    matrix = Matrix(appserv, spring_lobby_client, config)

    appserv.matrix_event_handler(matrix.handle_event)

    await matrix.wait_for_connection()
    await matrix.init_as_bot()

    # appservice_account = await appserv.intent.whoami()
    # user = appserv.intent.user(appservice_account)

    await appserv.intent.set_presence(PresenceState.ONLINE)

    # location = config["homeserver"]["domain"].split(".")[0]
    # external_id = "MatrixAppService"
    # external_username = config["appservice"]["bot_username"].split("_")[1]

    # for room in rooms:
    #
    #     enabled = config["bridge.rooms"][room]["enabled"]
    #     room_id = config["bridge.rooms"][room]["room_id"]
    #     room_alias = f"{config['appservice.namespace']}_{room}"
    #
    #     if enabled is True:
    #         await user.ensure_joined(room_id=room_id)
    #         await appserv.intent.add_room_alias(room_id=RoomID(room_id), alias_localpart=room_alias, override=True)
    #     # else:
    #     #     # await appserv.intent.remove_room_alias(alias_localpart=room_alias)
    #     #     try:
    #     #         await user.leave_room(room_id=room_id)
    #     #     except Exception as e:
    #     #         log.debug(f"Failed to leave room, not previously joined: {e}")

    appserv.ready = True
    log.info("Initialization complete, running startup actions")

    for signame in ('SIGINT', 'SIGTERM'):
        loop.add_signal_handler(
            getattr(signal, signame),
            lambda: asyncio.ensure_future(spring_lobby_client.exit(signame)))
Esempio n. 42
0
        if  logger           != "" : self.logger          = logger
        #runtime
        if  debug_level            : self.debug_level     = debug_level
        if  sock_timeout           : self.sock_timeout    = sock_timeout

        for group in groups_list:
            self.groups.append(Group(group,self.parser))

        for work in works_list:
            self.works.append(Works(work,self.parser))

        return self

class TaskServerLog:
    @staticmethod
    def get_logger(task_ser_config):
        vavava.util.assure_path("./log")
        logging.config.fileConfig(task_ser_config.log_config_file)
        return logging.getLogger(task_ser_config.logger)



if __name__ == "__main__":

    config = TaskServerConfig()
    config.load(("task_server.conf"))
    log = TaskServerLog.get_logger(config)
    log.debug("testing ...............")


Esempio n. 43
0
                    "--config",
                    type=str,
                    default="config.yaml",
                    metavar="<path>",
                    help="the path to your config file")
parser.add_argument("-b",
                    "--base-config",
                    type=str,
                    default="example-config.yaml",
                    metavar="<path>",
                    help="the path to the example config "
                    "(for automatic config updates)")
args = parser.parse_args()

config = Config(args.config, args.base_config)
config.load()
config.update()

logging.config.dictConfig(copy.deepcopy(config["logging"]))
init_log_listener()
log = logging.getLogger("maubot.init")
log.info(f"Initializing maubot {__version__}")

loop = asyncio.get_event_loop()

init_zip_loader(config)
db_session = init_db(config)
clients = init_client_class(db_session, loop)
plugins = init_plugin_instance_class(db_session, config, loop)
management_api = init_mgmt_api(config, loop)
server = MaubotServer(config, loop)
Esempio n. 44
0
def main(heartbeat_stop_callback=None):
    global config
    global home_dir

    parser = OptionParser()
    parser.add_option("-v",
                      "--verbose",
                      dest="verbose",
                      action="store_true",
                      help="verbose log output",
                      default=False)
    parser.add_option(
        "-e",
        "--expected-hostname",
        dest="expected_hostname",
        action="store",
        help=
        "expected hostname of current host. If hostname differs, agent will fail",
        default=None)
    parser.add_option("--home",
                      dest="home_dir",
                      action="store",
                      help="Home directory",
                      default="")
    (options, args) = parser.parse_args()

    expected_hostname = options.expected_hostname
    home_dir = options.home_dir

    logging_level = logging.DEBUG if options.verbose else logging.INFO

    setup_logging(logger, AmbariConfig.AmbariConfig.getLogFile(),
                  logging_level)
    global is_logger_setup
    is_logger_setup = True
    setup_logging(alerts_logger, AmbariConfig.AmbariConfig.getAlertsLogFile(),
                  logging_level)
    Logger.initialize_logger('resource_management',
                             logging_level=logging_level)

    if home_dir != "":
        # When running multiple Ambari Agents on this host for simulation, each one will use a unique home directory.
        Logger.info("Agent is using Home Dir: %s" % str(home_dir))

    # use the host's locale for numeric formatting
    try:
        locale.setlocale(locale.LC_ALL, '')
    except locale.Error as ex:
        logger.warning(
            "Cannot set locale for ambari-agent. Please check your systemwide locale settings. Failed due to: {0}."
            .format(str(ex)))

    default_cfg = {'agent': {'prefix': '/home/ambari'}}
    config.load(default_cfg)

    if (len(sys.argv) > 1) and sys.argv[1] == 'stop':
        stop_agent()

    if (len(sys.argv) > 2) and sys.argv[1] == 'reset':
        reset_agent(sys.argv)

    # Check for ambari configuration file.
    resolve_ambari_config()

    # Add syslog hanlder based on ambari config file
    add_syslog_handler(logger)

    # Starting data cleanup daemon
    data_cleaner = None
    if config.has_option('agent', 'data_cleanup_interval') and int(
            config.get('agent', 'data_cleanup_interval')) > 0:
        data_cleaner = DataCleaner(config)
        data_cleaner.start()

    perform_prestart_checks(expected_hostname)

    # Starting ping port listener
    try:
        #This acts as a single process machine-wide lock (albeit incomplete, since
        # we still need an extra file to track the Agent PID)
        ping_port_listener = PingPortListener(config)
    except Exception as ex:
        err_message = "Failed to start ping port listener of: " + str(ex)
        logger.error(err_message)
        sys.stderr.write(err_message)
        sys.exit(1)
    ping_port_listener.start()

    update_log_level(config)

    update_open_files_ulimit(config)

    if not config.use_system_proxy_setting():
        logger.info('Agent is configured to ignore system proxy settings')
        reconfigure_urllib2_opener(ignore_system_proxy=True)

    if not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
        daemonize()

    #
    # Iterate through the list of server hostnames and connect to the first active server
    #

    active_server = None
    server_hostnames = hostname.server_hostnames(config)

    connected = False
    stopped = False

    # Keep trying to connect to a server or bail out if ambari-agent was stopped
    while not connected and not stopped:
        for server_hostname in server_hostnames:
            server_url = config.get_api_url(server_hostname)
            try:
                server_ip = socket.gethostbyname(server_hostname)
                logger.info('Connecting to Ambari server at %s (%s)',
                            server_url, server_ip)
            except socket.error:
                logger.warn(
                    "Unable to determine the IP address of the Ambari server '%s'",
                    server_hostname)

            # Wait until MAX_RETRIES to see if server is reachable
            netutil = NetUtil(config, heartbeat_stop_callback)
            (retries, connected,
             stopped) = netutil.try_to_connect(server_url, MAX_RETRIES, logger)

            # if connected, launch controller
            if connected:
                logger.info('Connected to Ambari server %s', server_hostname)
                # Set the active server
                active_server = server_hostname
                # Launch Controller communication
                run_threads(server_hostname, heartbeat_stop_callback)

            #
            # If Ambari Agent connected to the server or
            # Ambari Agent was stopped using stop event
            # Clean up if not Windows OS
            #
            if connected or stopped:
                ExitHelper().exit(0)
                logger.info("finished")
                break
        pass  # for server_hostname in server_hostnames
    pass  # while not (connected or stopped)

    return active_server
Esempio n. 45
0
'''
Created on Nov 12, 2018

@author: colin
'''
#===============================================================================
# Setup  Logging
#===============================================================================
import logging
import logging.config
logger = logging.getLogger('Main')

LEVEL = 'INFO'

logging.basicConfig(format='%(asctime)s %(name)s:%(levelname)s:%(message)s',
                    datefmt='%m/%d/%Y %I:%M:%S %p',
                    level=LEVEL)

from CardStack import CardViewer
from CardStack import CardConfig

config = CardConfig('config')
message = config.info()
load = config.load(
    '/Users/colin/Documents/eclipse-workspace/JobCard3/config/config.yaml')
write = config.write('/tmp/output.yaml')

print(config.keys())
build = config.build()