Пример #1
0
def test_notify_no_socket():
    assert notify("READY=1") == False
    with skip_enosys():
        assert notify("FDSTORE=1", fds=[]) == False
    assert notify("FDSTORE=1", fds=[1, 2]) == False
    assert notify("FDSTORE=1", pid=os.getpid()) == False
    assert notify("FDSTORE=1", pid=os.getpid(), fds=(1,)) == False
Пример #2
0
    def __init__(self) -> None:
        LOGGER.info(f"Starting v{__version__}.")

        self.controller = Controller(loop, bus)
        self.udisks_manager = UDisksManager(bus, self.controller)

        try:
            # Publish our controller on the bus.
            self.controller_object = bus.publish("uk.org.j5.pepper2",
                                                 self.controller)
        except RuntimeError as e:
            if str(e) == "name already exists on the bus":
                LOGGER.error("pepperd is already running.")
                notify("STOPPING=1")
                exit(1)
            else:
                raise

        self.disk_signal_handler = bus.get(".UDisks2").InterfacesAdded.connect(
            self.udisks_manager.disk_signal, )

        # Shutdown gracefully
        signal(SIGHUP, self._signal_stop)
        signal(SIGINT, self._signal_stop)
        signal(SIGTERM, self._signal_stop)

        self.udisks_manager.detect_initial_drives()

        if self.controller.daemon_status is DaemonStatus.STARTING:
            # Only change the status if a usercode hasn't started.
            self.controller.daemon_status = DaemonStatus.READY

        notify("READY=1")
        LOGGER.info(f"Ready.")
def main() -> int:

    cfg = UAConfig()
    setup_logging(logging.INFO,
                  logging.DEBUG,
                  log_file=cfg.daemon_log_file,
                  logger=LOG)
    # The ua-daemon logger should log everything to its file
    # Make sure the ua-daemon logger does not generate double logging
    # by propagating to the root logger
    LOG.propagate = False
    # The root logger should only log errors to the daemon log file
    setup_logging(
        logging.CRITICAL,
        logging.ERROR,
        log_file=cfg.daemon_log_file,
        logger=logging.getLogger(),
    )

    LOG.debug("daemon starting")

    notify("READY=1")

    daemon.poll_for_pro_license(cfg)

    LOG.debug("daemon ending")
    return 0
Пример #4
0
    def send_notification(self):
        try:
            from systemd.daemon import notify
            event = threading.Event()

            # send first notification on init
            logger.debug('[Watchdog]... everything is ok')
            notify('WATCHDOG=1')

            while not event.wait(self.interval - 1):
                main_thread_alive = threading.main_thread().is_alive()
                logger.debug(
                    '[Watchdog] is alive {}'.format(main_thread_alive))
                if main_thread_alive:
                    logger.debug('[Watchdog]...')
                    url = settings.config_http['bind']
                    resp = requests.get(url)
                    if resp.status_code == 200:
                        logger.debug('[Watchdog] everything is ok')
                        notify('WATCHDOG=1')
                    else:
                        logger.warning(
                            f'[Watchdog] Watchdog not sent. Response status: {resp.status_code}; '
                            f'Response: {resp.__dict__}')
                else:
                    logger.critical(f'[Watchdog] Main thread is not alive.')
        except (KeyError, TypeError, ValueError):
            logger.info('[Watchdog] not enabled, keep_alive missing')
        except ImportError:
            logger.warn('[Watchdog] systemd not imported {}'.format(
                traceback.format_exc(limit=5)))
        except:
            logger.alert('[Watchdog] Unexpected exception {}'.format(
                traceback.format_exc(limit=5)))
Пример #5
0
def _watchdog_task(shared_state, log):
    """
    Continuously ping the watchdog to tell him that we're still alive.

    This will keep running until the parent thread dies, or
    shared_state.system_ready gets set to False by someone.
    """
    watchdog_timeout = \
            float(os.environ.get(
                'WATCHDOG_USEC',
                MPM_WATCHDOG_DEFAULT_TIMEOUT
            )) / 1e6
    watchdog_interval = watchdog_timeout / MPM_WATCHDOG_TIMEOUT_FRAC
    daemon.notify("READY=1")
    log.debug("Watchdog primed, going into watchdog loop (Interval: %s s)",
              watchdog_interval)
    while shared_state.system_ready.value:
        # Sleep first, then ping, that avoids the case where transfer_control()
        # is not yet complete before we call this for the first time, which
        # would lead in error messages popping up in the systemd journal.
        time.sleep(watchdog_interval)
        log.trace("Pinging watchdog....")
        daemon.notify("WATCHDOG=1")
    log.error("Terminating watchdog thread!")
    return
Пример #6
0
    def send_notification(self):
        try:
            from systemd.daemon import notify
            event = threading.Event()

            while not event.wait(self.interval - 1):
                main_thread_alive = threading.main_thread().is_alive()
                if main_thread_alive:

                    if 'last_event_ms' in uwsgi.opt and time.time() - uwsgi.opt[
                            'last_event_ms'] <= self.interval or self._test_http(
                            ):
                        log.verbose('[{}] Watchdog sent successfully'.format(
                            log.style.apply('OK', log.style.GREEN_FG)))
                        notify('WATCHDOG=1')

        except (KeyError, TypeError, ValueError) as e:
            log.error('[{}] Error {}'.format(
                log.style.apply('Watchdog', log.style.RED_FG), e))
        except ImportError:
            log.warning('[{}] systemd not imported {}'.format(
                log.style.apply('Watchdog', log.style.RED_FG),
                traceback.format_exc(limit=5)))
        except:
            log.alert('[{}] Unexpected exception {}'.format(
                log.style.apply('Watchdog', log.style.RED_FG),
                traceback.format_exc(limit=5)))
Пример #7
0
def main(args):
    """It is a main.

    arguments are passed in via env vars currently;
        WATCHDOG_USEC = 3000000
    """
    server = setup(args.CONFIGFILE, args.YEAR)
    rgb = StarCamera(args.CAM)
    ir = science_camera(args.CAM)

    CONNECTIONS = {}

    epoll = select.epoll()
    epoll.register(server.fileno(), select.EPOLLIN)

    try:
        connection(sys.stdin, epoll)
    except IOError:
        pass

    daemon.notify("WATCHDOG=1")
    last_ping = time()
    while True:
        # systemd watchdog
        events = epoll.poll(
            float(os.environ['WATCHDOG_USEC']) / 2.0e6 - (time() - last_ping))
        if (len(events) == 0 or time() >=
            (last_ping + float(os.environ['WATCHDOG_USEC']) / 2.0e6)):
            daemon.notify("WATCHDOG=1")
            last_ping = time()
        # events = epoll.poll()
        for fd, event_type in events:
            # Activity on the master socket means a new connection.
            if fd == server.fileno():
                conn, addr = server.accept()
                connection(conn, epoll)
            elif fd in CONNECTIONS:
                w = CONNECTIONS[fd]
                data = w.read()
                print(data.decode(encoding='UTF-8'), file=sys.stderr)
                if len(data) > 0:
                    if sys.version_info[0] > 2:
                        stdout_redir = StringIO()
                    else:
                        stdout_redir = BytesIO()
                    stdout_old = sys.stdout
                    sys.stdout = stdout_redir
                    try:
                        exec(data)
                    except SystemExit:
                        w.close()
                        raise
                    except:
                        traceback.print_exc(file=sys.stdout)
                    sys.stdout = stdout_old
                    data_out = stdout_redir.getvalue()
                    print(data_out, file=sys.stderr)
                    w.write(data_out)
                else:
                    w.close()
Пример #8
0
def test_notify_no_socket():
    assert notify('READY=1') is False
    with skip_enosys():
        assert notify('FDSTORE=1', fds=[]) is False
    assert notify('FDSTORE=1', fds=[1, 2]) is False
    assert notify('FDSTORE=1', pid=os.getpid()) is False
    assert notify('FDSTORE=1', pid=os.getpid(), fds=(1,)) is False
Пример #9
0
def when_ready(server):
    try:
        from systemd.daemon import notify

        notify("READY=1")
    except ImportError:
        pass
Пример #10
0
def notify_ready():
    """
    Use systemd notify protocol, or upstart sigstop, to notify
    rediness of the nova-agent.
    """

    global _ready

    if _ready:
        return

    # PyPI edition of python-systemd
    try:
        from systemd.daemon import notify, Notification
        notify(Notification.READY)
    # Older v234 python-systemd
    except ImportError:
        try:
            from systemd.daemon import notify
            notify('READY=1')
        except ImportError:
            pass

    # Upstart notification type
    if os.environ.get('UPSTART_JOB'):
        import signal
        os.kill(os.getpid(), signal.SIGSTOP)

    _ready = True
Пример #11
0
def main(argv):
    tf.logging.set_verbosity(tf.logging.INFO)
    np.random.seed(42)

    config = ServerConfig.load((FLAGS.config_file, ))

    if sys.version_info[2] >= 6:
        thread_pool = ThreadPoolExecutor(thread_name_prefix='query-thread-')
    else:
        thread_pool = ThreadPoolExecutor(max_workers=32)
    tokenizer_service = TokenizerService()
    app = Application(config, thread_pool, tokenizer_service)

    if config.ssl_key:
        ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
        ssl_ctx.load_cert_chain(config.ssl_chain, config.ssl_key)
        app.listen(config.port, ssl_options=ssl_ctx)
    else:
        app.listen(config.port)

    if config.user:
        os.setgid(grp.getgrnam(config.user)[2])
        os.setuid(pwd.getpwnam(config.user)[2])

    if sd:
        sd.notify('READY=1')

    tokenizer_service.run()
    app.load_all_languages()

    sys.stdout.flush()
    tornado.ioloop.IOLoop.current().start()
Пример #12
0
    def __init__(self):
        # Enable logging
        logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                            level=logging.INFO)

        self.logger = logging.getLogger(__name__)

        # Systemd journal handler
        self.logger.addHandler(journal.JournaldLogHandler())

        # Initialize engine
        self.engine = engine.Engine()

        self.QUESTION, self.SELECTQUESTION, self.SUPPORTCONFIRM, self.SUPPORTSUBMIT, self.SUPPORT = range(5)

        # Create the Updater and pass it your bot's token.
        # Make sure to set use_context=True to use the new context based callbacks
        # Post version 12 this will no longer be necessary

        # Get the token here
        _pwd = os.path.dirname(os.path.abspath(__file__))
        with open(os.path.join(_pwd, "../api.token"), 'r') as f:
            botToken = f.readlines()[0].strip()

        self.updater = Updater(botToken, use_context=True)

        # Get the dispatcher to register handlers
        self.dp = self.updater.dispatcher

        # Add conversation handler with states
        self.conv_handler = ConversationHandler(
            entry_points=[CommandHandler('start', self.start)],

            states={
                self.QUESTION:          [MessageHandler(Filters.text, self.question)],
                self.SELECTQUESTION:    [MessageHandler(Filters.text, self.selectQuestion)],
                self.SUPPORTCONFIRM:    [MessageHandler(Filters.regex('^(Yes|No)$'), self.supportConfirm), CommandHandler('skip', self.skip_support)],
                self.SUPPORTSUBMIT:     [MessageHandler(Filters.text, self.supportSubmit)]
            },

            fallbacks=[CommandHandler('support', self.support), CommandHandler('cancel', self.cancel), CommandHandler('help', self.help)]
        )

        self.dp.add_handler(self.conv_handler)

        # log all errors
        self.dp.add_error_handler(self.error)

        # Start the Bot
        self.updater.start_polling()
        print("Ready")
        notify(Notification.READY)

        # Run the bot until you press Ctrl-C or the process receives SIGINT,
        # SIGTERM or SIGABRT. This should be used most of the time, since
        # start_polling() is non-blocking and will stop the bot gracefully.
        self.updater.idle()
        notify(Notification.STOPPING)
        print("Ended")
Пример #13
0
    def _notify_systemd(self):
        if self.systemd_notified:
            return

        if daemon:
            daemon.notify("READY=1")

        self.systemd_notified = True
Пример #14
0
def test_notify_no_socket():
    os.environ.pop('NOTIFY_SOCKET', None)

    assert notify('READY=1') is False
    with skip_enosys():
        assert notify('FDSTORE=1', fds=[]) is False
    assert notify('FDSTORE=1', fds=[1, 2]) is False
    assert notify('FDSTORE=1', pid=os.getpid()) is False
    assert notify('FDSTORE=1', pid=os.getpid(), fds=(1, )) is False
Пример #15
0
    def stop(self):
        self.status('Stopping ...')

        # Stop the pigpio connection.
        self._pi.stop()

        # Mark  the daemon as stopping.
        daemon.notify(daemon.Notification.STOPPING)
        self._running = False
Пример #16
0
 def run(self):
     self.running = True
     pidfile = "/tmp/ips-nodefacade.pid"
     file(pidfile, 'w').write(str(getpid()))
     self.start()
     daemon.notify("READY=1")
     while self.running:
         self.registry.update_ptp()
         time.sleep(1)
     os.unlink(pidfile)
     self._cleanup()
Пример #17
0
def _systemd_notify_ready():
    if not os.environ.get("NOTIFY_SOCKET"):
        return
    try:
        from systemd import daemon  # pylint: disable=no-name-in-module,disable=import-outside-toplevel
        daemon.notify("READY=1")
    except ImportError:
        logger.warning(
            "Running under systemd but python-systemd not available, attempting systemd notify via utility"
        )
        subprocess.run(["systemd-notify", "--ready"], check=True)
Пример #18
0
def process_replay_objects(all_objects: Dict[str, List[BaseModel]], *,
                           storage: StorageInterface) -> None:
    for (object_type, objects) in all_objects.items():
        logger.debug("Inserting %s %s objects", len(objects), object_type)
        with statsd.timed(GRAPH_DURATION_METRIC,
                          tags={"object_type": object_type}):
            _insert_objects(object_type, objects, storage)
        statsd.increment(GRAPH_OPERATIONS_METRIC,
                         len(objects),
                         tags={"object_type": object_type})
    if notify:
        notify("WATCHDOG=1")
Пример #19
0
def test_daemon_notify_memleak():
    # https://github.com/systemd/python-systemd/pull/51
    fd = 1
    fds = [fd]
    ref_cnt = sys.getrefcount(fd)

    try:
        notify('', True, 0, fds)
    except ConnectionRefusedError:
        pass

    assert sys.getrefcount(fd) <= ref_cnt, 'leak'
Пример #20
0
def test_notify_with_socket(tmpdir):
    path = tmpdir.join('socket').strpath
    sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
    sock.bind(path)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_PASSCRED, 1)
    os.environ['NOTIFY_SOCKET'] = path

    assert notify('READY=1') == True
    assert notify('FDSTORE=1', fds=[]) == True
    assert notify('FDSTORE=1', fds=[1,2]) == True
    assert notify('FDSTORE=1', pid=os.getpid()) == True
    assert notify('FDSTORE=1', pid=os.getpid(), fds=(1,)) == True
Пример #21
0
def test_notify_with_socket(tmpdir):
    path = tmpdir.join('socket').strpath
    sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
    sock.bind(path)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_PASSCRED, 1)
    os.environ['NOTIFY_SOCKET'] = path

    assert notify('READY=1') == True
    assert notify('FDSTORE=1', fds=[]) == True
    assert notify('FDSTORE=1', fds=[1, 2]) == True
    assert notify('FDSTORE=1', pid=os.getpid()) == True
    assert notify('FDSTORE=1', pid=os.getpid(), fds=(1, )) == True
Пример #22
0
    def __init__(self, config_path):
        self.log = logging.getLogger("pghoard")
        self.log_level = None
        self.running = True
        self.config_path = config_path
        self.compression_queue = Queue()
        self.transfer_queue = Queue()
        self.syslog_handler = None
        self.config = {}
        self.site_transfers = {}
        self.state = {
            "backup_sites": {},
            "startup_time": datetime.datetime.utcnow().isoformat(),
        }
        self.load_config()

        if not os.path.exists(self.config["backup_location"]):
            os.makedirs(self.config["backup_location"])

        signal.signal(signal.SIGHUP, self.load_config)
        signal.signal(signal.SIGINT, self.quit)
        signal.signal(signal.SIGTERM, self.quit)
        self.time_of_last_backup = {}
        self.time_of_last_backup_check = {}
        self.basebackups = {}
        self.basebackups_callbacks = {}
        self.receivexlogs = {}
        self.compressors = []
        self.transfer_agents = []
        self.requested_basebackup_sites = set()

        self.inotify = InotifyWatcher(self.compression_queue)
        self.webserver = WebServer(self.config,
                                   self.requested_basebackup_sites,
                                   self.compression_queue, self.transfer_queue)

        for _ in range(self.config["compression"]["thread_count"]):
            compressor = CompressorThread(self.config, self.compression_queue,
                                          self.transfer_queue)
            self.compressors.append(compressor)

        for _ in range(self.config["transfer"]["thread_count"]):
            ta = TransferAgent(self.config, self.compression_queue,
                               self.transfer_queue)
            self.transfer_agents.append(ta)

        if daemon:  # If we can import systemd we always notify it
            daemon.notify("READY=1")
            self.log.debug(
                "Sent startup notification to systemd that pghoard is READY")
        self.log.info("pghoard initialized, own_hostname: %r, cwd: %r",
                      socket.gethostname(), os.getcwd())
Пример #23
0
    def __init__(self, config_path):
        self.log = logging.getLogger("pglookout")
        self.running = True
        self.replication_lag_over_warning_limit = False

        self.config_path = config_path
        self.config = {}
        self.log_level = "DEBUG"

        self.connected_master_nodes = {}
        self.disconnected_master_nodes = {}
        self.connected_observer_nodes = {}
        self.disconnected_observer_nodes = {}
        self.replication_lag_warning_boundary = None
        self.replication_lag_failover_timeout = None
        self.own_db = None
        self.current_master = None
        self.failover_command = None
        self.over_warning_limit_command = None
        self.never_promote_these_nodes = None
        self.cluster_monitor = None
        self.syslog_handler = None
        self.load_config()

        signal.signal(signal.SIGHUP, self.load_config)
        signal.signal(signal.SIGINT, self.quit)
        signal.signal(signal.SIGTERM, self.quit)

        self.cluster_state = {}
        self.observer_state = {}
        self.overall_state = {
            "db_nodes": self.cluster_state,
            "observer_nodes": self.observer_state,
            "current_master": self.current_master,
            "replication_lag_over_warning":
            self.replication_lag_over_warning_limit
        }

        self.cluster_monitor = ClusterMonitor(self.config, self.cluster_state,
                                              self.observer_state,
                                              self.create_alert_file)
        # cluster_monitor doesn't exist at the time of reading the config initially
        self.cluster_monitor.log.setLevel(self.log_level)
        self.webserver = WebServer(self.config, self.cluster_state)

        if daemon:  # If we can import systemd we always notify it
            daemon.notify("READY=1")
            self.log.info(
                "Sent startup notification to systemd that pglookout is READY")
        self.log.info(
            "PGLookout initialized, own_hostname: %r, own_db: %r, cwd: %r",
            socket.gethostname(), self.own_db, os.getcwd())
Пример #24
0
def test_notify_bad_socket():
    os.environ['NOTIFY_SOCKET'] = '/dev/null'

    with pytest.raises(connection_error):
        notify('READY=1')
    with pytest.raises(connection_error):
        notify('FDSTORE=1', fds=[])
    with pytest.raises(connection_error):
        notify('FDSTORE=1', fds=[1,2])
    with pytest.raises(connection_error):
        notify('FDSTORE=1', pid=os.getpid())
    with pytest.raises(connection_error):
        notify('FDSTORE=1', pid=os.getpid(), fds=(1,))
Пример #25
0
async def main():
    if is_systemd:
        notify(Notification.READY)
    await client.get_dialogs()
    global fwd_channel
    fwd_channel = await client.get_entity(settings.forward_channel)

    while client.is_connected():
        if is_systemd:
            notify(Notification.STATUS, "I'm fine")
            await asyncio.sleep(10)
        else:
            await asyncio.sleep(3600)
Пример #26
0
def test_notify_bad_socket():
    os.environ['NOTIFY_SOCKET'] = '/dev/null'

    with pytest.raises(connection_error):
        notify('READY=1')
    with pytest.raises(connection_error):
        notify('FDSTORE=1', fds=[])
    with pytest.raises(connection_error):
        notify('FDSTORE=1', fds=[1, 2])
    with pytest.raises(connection_error):
        notify('FDSTORE=1', pid=os.getpid())
    with pytest.raises(connection_error):
        notify('FDSTORE=1', pid=os.getpid(), fds=(1, ))
Пример #27
0
 def run(self):
     self.running = True
     self.start()
     self.facade.register_service("http://" + HOST + ":" + str(PORT), "{}/{}/{}/".format(APINAMESPACE, APINAME, APIVERSION))
     daemon.notify("READY=1")
     itercount = 0
     while self.running:
         itercount += 1
         gevent.sleep(1)
         if itercount == 5: #5 seconds
             self.facade.heartbeat_service()
             itercount = 0
     self.facade.unregister_service()
     self._cleanup()
Пример #28
0
def test_notify_bad_socket():
    os.environ["NOTIFY_SOCKET"] = "/dev/null"

    with pytest.raises(connection_error):
        notify("READY=1")
    with pytest.raises(connection_error):
        with skip_enosys():
            notify("FDSTORE=1", fds=[])
    with pytest.raises(connection_error):
        notify("FDSTORE=1", fds=[1, 2])
    with pytest.raises(connection_error):
        notify("FDSTORE=1", pid=os.getpid())
    with pytest.raises(connection_error):
        notify("FDSTORE=1", pid=os.getpid(), fds=(1,))
Пример #29
0
    def __init__(self, baseurl, dir):
        # This matches a kernel id (uuid4 format) from a url
        _kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
        baseurl = baseurl.rstrip('/')
        handlers_list = [
            (r"/", handlers.RootHandler),
            (r"/embedded_sagecell.js", tornado.web.RedirectHandler, {
                "url": baseurl + "/static/embedded_sagecell.js"
            }),
            (r"/help.html", handlers.HelpHandler),
            (r"/kernel", handlers.KernelHandler),
            (r"/kernel/%s" % _kernel_id_regex, handlers.KernelHandler),
            (r"/kernel/%s/channels" % _kernel_id_regex,
             handlers.WebChannelsHandler),
            (r"/kernel/%s/files/(?P<file_path>.*)" % _kernel_id_regex,
             handlers.FileHandler, {
                 "path": dir
             }),
            (r"/permalink", permalink.PermalinkHandler),
            (r"/service", handlers.ServiceHandler),
            (r"/tos.html", handlers.TOSHandler),
        ] + handlers.KernelRouter.urls
        handlers_list = [[baseurl + i[0]] + list(i[1:]) for i in handlers_list]
        settings = dict(compress_response=True,
                        template_path=os.path.join(
                            os.path.dirname(os.path.abspath(__file__)),
                            "templates"),
                        static_path=os.path.join(
                            os.path.dirname(os.path.abspath(__file__)),
                            "static"),
                        static_url_prefix=baseurl + "/static/",
                        static_handler_class=handlers.StaticHandler)
        self.kernel_dealer = KernelDealer(config.get("provider_settings"))
        start_providers(self.kernel_dealer.port, config.get("providers"), dir)
        self.completer = handlers.Completer(self.kernel_dealer)
        db = __import__('db_' + config.get('db'))
        self.db = db.DB(config.get('db_config')['uri'])
        self.ioloop = tornado.ioloop.IOLoop.current()

        # to check for blocking when debugging, uncomment the following
        # and set the argument to the blocking timeout in seconds
        self.ioloop.set_blocking_log_threshold(.5)
        super(SageCellServer, self).__init__(handlers_list, **settings)
        logger.info('SageCell server started')
        try:
            from systemd.daemon import notify
            logger.debug('notifying systemd that we are ready')
            notify('READY=1\nMAINPID={}'.format(os.getpid()), True)
        except ImportError:
            pass
Пример #30
0
    def _terminate(self, signal_number: int,
                   frame: Optional[FrameType]) -> None:
        logger = self._logger
        observer = self._observer
        current_pid = self._current_pid

        logger.info(
            f"Received terminating signal {signal_number}, stopping the observer..."
        )
        systemd_daemon.notify(constants.NOTIFICATION_STOPPING, pid=current_pid)

        if observer.is_alive():
            observer.stop()
            observer.join()
Пример #31
0
    def __init__(self, config_path):
        self.log = logging.getLogger("pghoard")
        self.log_level = None
        self.running = True
        self.config_path = config_path
        self.compression_queue = Queue()
        self.transfer_queue = Queue()
        self.syslog_handler = None
        self.config = {}
        self.site_transfers = {}
        self.state = {
            "backup_sites": {},
            "startup_time": datetime.datetime.utcnow().isoformat(),
            }
        self.load_config()

        if not os.path.exists(self.config["backup_location"]):
            os.makedirs(self.config["backup_location"])

        signal.signal(signal.SIGHUP, self.load_config)
        signal.signal(signal.SIGINT, self.quit)
        signal.signal(signal.SIGTERM, self.quit)
        self.time_of_last_backup = {}
        self.time_of_last_backup_check = {}
        self.basebackups = {}
        self.basebackups_callbacks = {}
        self.receivexlogs = {}
        self.compressors = []
        self.transfer_agents = []
        self.requested_basebackup_sites = {}

        self.inotify = InotifyWatcher(self.compression_queue)
        self.webserver = WebServer(
            self.config,
            self.requested_basebackup_sites,
            self.compression_queue,
            self.transfer_queue)

        for _ in range(self.config["compression"]["thread_count"]):
            compressor = CompressorThread(self.config, self.compression_queue, self.transfer_queue)
            self.compressors.append(compressor)

        for _ in range(self.config["transfer"]["thread_count"]):
            ta = TransferAgent(self.config, self.compression_queue, self.transfer_queue)
            self.transfer_agents.append(ta)

        if daemon:  # If we can import systemd we always notify it
            daemon.notify("READY=1")
            self.log.info("Sent startup notification to systemd that pghoard is READY")
        self.log.info("pghoard initialized, own_hostname: %r, cwd: %r", socket.gethostname(), os.getcwd())
Пример #32
0
    def __init__(self, config_path):
        self.log = logging.getLogger("pglookout")
        self.running = True
        self.replication_lag_over_warning_limit = False

        self.config_path = config_path
        self.config = {}
        self.log_level = "DEBUG"

        self.connected_master_nodes = {}
        self.disconnected_master_nodes = {}
        self.connected_observer_nodes = {}
        self.disconnected_observer_nodes = {}
        self.replication_lag_warning_boundary = None
        self.replication_lag_failover_timeout = None
        self.missing_master_from_config_timeout = None
        self.own_db = None
        self.current_master = None
        self.failover_command = None
        self.over_warning_limit_command = None
        self.never_promote_these_nodes = None
        self.primary_conninfo_template = None
        self.cluster_monitor = None
        self.syslog_handler = None
        self.cluster_nodes_change_time = time.time()
        self.trigger_check_queue = Queue()
        self.load_config()

        signal.signal(signal.SIGHUP, self.load_config)
        signal.signal(signal.SIGINT, self.quit)
        signal.signal(signal.SIGTERM, self.quit)

        self.cluster_state = {}
        self.observer_state = {}
        self.overall_state = {"db_nodes": self.cluster_state, "observer_nodes": self.observer_state,
                              "current_master": self.current_master,
                              "replication_lag_over_warning": self.replication_lag_over_warning_limit}

        self.cluster_monitor = ClusterMonitor(self.config, self.cluster_state,
                                              self.observer_state, self.create_alert_file,
                                              trigger_check_queue=self.trigger_check_queue)
        # cluster_monitor doesn't exist at the time of reading the config initially
        self.cluster_monitor.log.setLevel(self.log_level)
        self.webserver = WebServer(self.config, self.cluster_state)

        if daemon:  # If we can import systemd we always notify it
            daemon.notify("READY=1")
            self.log.info("Sent startup notification to systemd that pglookout is READY")
        self.log.info("PGLookout initialized, local hostname: %r, own_db: %r, cwd: %r",
                      socket.gethostname(), self.own_db, os.getcwd())
Пример #33
0
    async def on_ready(self):
        """|coro|

        Called when bot is ready.
        """
        if self.running:  # Used to prevent on_ready being run during reconnect
            return
        self.running = True
        await self.load_cogs()
        await self.set_status(self.config['CLIENT']['status']
                              )  # Set Discord Status
        await self.load_scheds()
        notify(Notification.READY)  # Sends systemd notification
        print('Logged in as {0.user}'.format(self.bot) + '!\n', flush=True)
Пример #34
0
    def stop(self) -> None:
        """Stop the daemon."""
        notify("STOPPING=1")
        LOGGER.info("Stopping.")

        # Disconnect from D-Bus
        self.disk_signal_handler.disconnect()
        self.controller_object.unpublish
        sleep(0.3)  # Wait, just in case usercode has only just started.

        if self.controller.usercode_driver is not None:
            self.controller.usercode_driver.stop_execution()

        loop.quit()
        LOGGER.info("Stopped.")
Пример #35
0
def watch(path):
    signal.signal(signal.SIGTERM, termed)
    event_handler = MyHandler()
    observer = Observer()
    observer.schedule(event_handler, path, recursive=True)
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    finally:
        observer.stop()
        notify("STOPPING=1")
    observer.join()
Пример #36
0
def test_notify_with_socket(tmpdir):
    path = tmpdir.join("socket").strpath
    sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
    sock.bind(path)
    # SO_PASSCRED is not defined in python2.7
    SO_PASSCRED = getattr(socket, "SO_PASSCRED", 16)
    sock.setsockopt(socket.SOL_SOCKET, SO_PASSCRED, 1)
    os.environ["NOTIFY_SOCKET"] = path

    assert notify("READY=1") == True
    with skip_enosys():
        assert notify("FDSTORE=1", fds=[]) == True
    assert notify("FDSTORE=1", fds=[1, 2]) == True
    assert notify("FDSTORE=1", pid=os.getpid()) == True
    assert notify("FDSTORE=1", pid=os.getpid(), fds=(1,)) == True
Пример #37
0
def test_notify_with_socket(tmpdir):
    path = tmpdir.join('socket').strpath
    sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
    sock.bind(path)
    # SO_PASSCRED is not defined in python2.7
    SO_PASSCRED = getattr(socket, 'SO_PASSCRED', 16)
    sock.setsockopt(socket.SOL_SOCKET, SO_PASSCRED, 1)
    os.environ['NOTIFY_SOCKET'] = path

    assert notify('READY=1') == True
    with skip_enosys():
        assert notify('FDSTORE=1', fds=[]) == True
    assert notify('FDSTORE=1', fds=[1, 2]) == True
    assert notify('FDSTORE=1', pid=os.getpid()) == True
    assert notify('FDSTORE=1', pid=os.getpid(), fds=(1, )) == True
Пример #38
0
    def __init__(self, baseurl, dir):
        # This matches a kernel id (uuid4 format) from a url
        _kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
        baseurl = baseurl.rstrip('/')
        handlers_list = [
            (r"/", handlers.RootHandler),
            (r"/embedded_sagecell.js",
             tornado.web.RedirectHandler,
             {"url":baseurl+"/static/embedded_sagecell.js"}),
            (r"/help.html", handlers.HelpHandler),
            (r"/kernel", handlers.KernelHandler),
            (r"/kernel/%s" % _kernel_id_regex, handlers.KernelHandler),
            (r"/kernel/%s/channels" % _kernel_id_regex,
             handlers.WebChannelsHandler),
            (r"/kernel/%s/files/(?P<file_path>.*)" % _kernel_id_regex,
             handlers.FileHandler, {"path": dir}),
            (r"/permalink", permalink.PermalinkHandler),
            (r"/service", handlers.ServiceHandler),
            (r"/tos.html", handlers.TOSHandler),
            ] + handlers.KernelRouter.urls
        handlers_list = [[baseurl+i[0]]+list(i[1:]) for i in handlers_list]
        settings = dict(
            compress_response = True,
            template_path = os.path.join(
                os.path.dirname(os.path.abspath(__file__)), "templates"),
            static_path = os.path.join(
                os.path.dirname(os.path.abspath(__file__)), "static"),
            static_url_prefix = baseurl + "/static/",
            static_handler_class = handlers.StaticHandler
            )
        self.kernel_dealer = KernelDealer(config.get("provider_settings"))
        start_providers(self.kernel_dealer.port, config.get("providers"), dir)
        self.completer = handlers.Completer(self.kernel_dealer)
        db = __import__('db_' + config.get('db'))
        self.db = db.DB(config.get('db_config')['uri'])
        self.ioloop = zmq.eventloop.IOLoop.instance()

        # to check for blocking when debugging, uncomment the following
        # and set the argument to the blocking timeout in seconds
        self.ioloop.set_blocking_log_threshold(.5)
        super(SageCellServer, self).__init__(handlers_list, **settings)
        logger.info('SageCell server started')
        try:
            from systemd.daemon import notify
            logger.debug('notifying systemd that we are ready')
            notify('READY=1\nMAINPID={}'.format(os.getpid()), True)
        except ImportError:
            pass
Пример #39
0
    def main(cls, args):
        if len(args) != 1:
            print("config file argument required")
            return 1

        exe = None
        try:
            exe = cls(config_path=args[0])
            daemon.notify("READY=1")
            return exe.run()
        except ServiceDaemonError as ex:
            logging.fatal("%s failed to start: %s", cls.__name__, ex)
            return 1
        finally:
            if exe:
                exe.cleanup()
Пример #40
0
    def reload_config(self):
        file_ctime = None
        try:
            file_ctime = os.path.getctime(self.config_path)
        except FileNotFoundError:
            if self.require_config:
                raise ServiceDaemonError("Cannot start without json config file at {!r}".format(self.config_path))

        if file_ctime != self.config_file_ctime:
            daemon.notify("RELOADING=1")
            self.log.info("%sloading configuration", "re" if self.config_file_ctime else "")
            self.config_file_ctime = file_ctime
            self.config = read_file(self.config_path, json=True)
            self.log.info("new config: %r", self.config)
            self.log.setLevel(self.config.get("log_level", logging.INFO))
            self.handle_new_config()
            daemon.notify("READY=1")
Пример #41
0
		def wait_iter():
			if not wait_iter.sd_ready:
				daemon.notify('READY=1')
				daemon.notify('STATUS=Running in {} mode...'.format(opts.call))
				wait_iter.sd_ready = True
			time.sleep(wait_iter.timeout)
			if wait_iter.sd_wdt: daemon.notify('WATCHDOG=1')
Пример #42
0
 def sd_cycle(ts=None):
     if not sd_cycle.ready:
         daemon.notify('READY=1')
         daemon.notify('STATUS=Running...')
         sd_cycle.ready = True
     if sd_cycle.delay:
         if ts is None: ts = mono_time()
         delay = ts - sd_cycle.ts_next
         if delay > 0: time.sleep(delay)
         sd_cycle.ts_next += sd_cycle.delay
     else: sd_cycle.ts_next = None
     if sd_cycle.wdt: daemon.notify('WATCHDOG=1')
    def _receive(self):
        rebind = False
        receive = True
        while receive:
            log.debug("Waiting for a new message")
            try:
                message = self._read_from_stream()
                log.debug("Received message ---> <{0}>".format(message))
            except Exception:
                log.error("Communication error")
                print(traceback.format_exc())
                message = None

            if notify:
                notify('WATCHDOG=1')

            if message is None:
                receive = False
                log.warning("No new message received")
            elif message == PROBE_CMD:
                # Skipping the PROBE message, keep on receiving messages.
                log.debug("PROBE message")
            elif message.startswith(ERROR_CMD):
                # Terminate the receiving loop on ERROR message
                receive = False
                log.error("ERROR")
            elif message.startswith(LOOP_CMD):
                # Terminate the the receiving loop on LOOP message.
                # A complete implementation should proceed with
                # a rebind of the session.
                log.debug("LOOP")
                receive = False
                rebind = True
            elif message.startswith(SYNC_ERROR_CMD):
                # Terminate the receiving loop on SYNC ERROR message.
                # A complete implementation should create a new session
                # and re-subscribe to all the old items and relative fields.
                log.error("SYNC ERROR")
                receive = False
            elif message.startswith(END_CMD):
                # Terminate the receiving loop on END message.
                # The session has been forcibly closed on the server side.
                # A complete implementation should handle the
                # "cause_code" if present.
                log.info("Connection closed by the server")
                receive = False
            elif message.startswith("Preamble"):
                # Skipping Preamble message, keep on receiving messages.
                log.debug("Preamble")
            else:
                self._forward_update_message(message)

        self._stream_connection = None
        if not rebind:
            log.debug("Closing connection")
            # Clear internal data structures for session
            # and subscriptions management.
            self._stream_connection.close()
            self._session.clear()
            self._subscriptions.clear()
            self._current_subscription_key = 0
        else:
            log.debug("Binding to this active session")
            self.bind()
Пример #44
0
    def systemd_status(message):
        """Send a status `message` to systemd.

        :type message: str
        """
        notify("STATUS={message}".format(message=message))
Пример #45
0
 def sigterm(self, signum, frame):  # pylint: disable=unused-argument
     self.log.info("Received SIG%s, stopping daemon...", "TERM" if (signum == signal.SIGTERM) else "INT")
     daemon.notify("STOPPING=1")
     self.running = False
Пример #46
0
 def ping_watchdog(self):
     """Let systemd know we are still alive and well"""
     daemon.notify("WATCHDOG=1")
Пример #47
0
	def watchdog(self):
		notify("WATCHDOG=1")
		self.watchdog_handle = self.loop.call_later(self.timeout, self.watchdog)
Пример #48
0
	def subsystem_started(self, subsystem):
		if subsystem in self.subsystems:
			self.subsystems.remove(subsystem)
			if self.subsystems == set():
				notify("READY=1")
Пример #49
0
	def run(self,ctx,**k):
		event = self.params(ctx)
		if not len(event):
			raise SyntaxError("No parameters make no sense")

		sd.notify("STATUS="+' '.join(event))
def main(args=None):
	import argparse
	parser = argparse.ArgumentParser(
		usage='%(prog)s [options] [ [--] arguments ]', # argparse fails to build that for $REASONS
		description='Tool to generate and keep tinydns'
			' zone file with dynamic dns entries for remote hosts.')

	parser.add_argument('zone_file', nargs='?',
		help='Path to tinydns zone file with client Ed25519 (base64-encoded)'
				' pubkeys and timestamps in comments before entries.'
			' Basically any line with IPs that has comment in the form of'
				' "dynamic: <ts> <pubkey> <pubkey2> ..." immediately before it (no empty lines'
				' or other comments separating these) can be updated by packet with'
				' proper ts/signature.')

	parser.add_argument('update_command', nargs='*',
		help='Optional command to run on zone file updates and its arguments (if any).'
			' If --uid is specified, all commands will be run after dropping privileges.'
			' Use "--" before it to make sure that none of its args get interpreted by this script.')

	parser.add_argument('-g', '--genkey', action='store_true',
		help='Generate a new random signing/verify'
			' Ed25519 keypair, print both keys to stdout and exit.')

	parser.add_argument('-b', '--bind',
		metavar='[host:]port', default=bytes(default_port),
		help='Host/port to bind listening socket to (default: %(default)s).')
	parser.add_argument('-v', '--ip-af',
		metavar='{ 4 | 6 }', choices=('4', '6'), default=socket.AF_UNSPEC,
		help='Resolve hostname(s) (if any) using specified address family version.'
			' Either "4" or "6", no restriction is appled by default.')

	parser.add_argument('--systemd', action='store_true',
		help='Receive socket fd from systemd, send systemd startup notification.'
			' This allows for systemd socket activation and running the'
				' app from unprivileged uid right from systemd service file.'
			' Requires systemd python bindings.')
	parser.add_argument('-u', '--uid', metavar='uid[:gid]',
		help='User (name) or uid (and optional group/gid) to run as after opening socket.'
			' WARNING: not a proper daemonization - does not do setsid, chdir,'
				' closes fds, sets optional gids, etc - just setresuid/setresgid. Use systemd for that.')

	parser.add_argument('--update-timestamps', action='store_true',
		help='Usually, when no addresses are changed, zone file does not get updated.'
			' This option forces updates to timestamps in addr-block headers.')

	parser.add_argument('-d', '--debug', action='store_true', help='Verbose operation mode.')
	opts = parser.parse_args(sys.argv[1:] if args is None else args)

	global log
	import logging
	logging.basicConfig(level=logging.DEBUG if opts.debug else logging.WARNING)
	log = logging.getLogger()

	if opts.genkey:
		signing_key = key_generate()
		print('Signing key (for client only):\n  ', key_encode(signing_key), '\n')
		print('Verify key (for this script):\n  ', key_encode(key_get_vk(signing_key)), '\n')
		return

	if not opts.zone_file: parser.error('Zone file path must be specified')

	if isinstance(opts.ip_af, types.StringTypes):
		opts.ip_af = {'4': socket.AF_INET, '6': socket.AF_INET6}[opts.ip_af]

	try: host, port = opts.bind.rsplit(':', 1)
	except ValueError: host, port = default_bind, opts.bind
	socktype, port = socket.SOCK_DGRAM, int(port)
	af, addr = get_socket_info(host, port, family=opts.ip_af, socktype=socktype)

	sock = None
	if opts.systemd:
		from systemd import daemon
		try: sock, = daemon.listen_fds()
		except ValueError as err:
			log.info('Unable to get socket from systemd, will create it manually')
		else: sock = socket.fromfd(af, socktype)
		daemon.notify('READY=1')
		daemon.notify('STATUS=Listening for update packets')
	if not sock:
		log.debug('Binding to: %r (port: %s, af: %s, socktype: %s)', addr, port, af, socktype)
		sock = socket.socket(af, socktype)
		sock.bind((addr, port))
	if opts.uid: drop_privileges(opts.uid)

	with open(opts.zone_file, 'rb'): pass # access check
	zone_update_loop(
		opts.zone_file, sock, opts.update_command,
		force_updates=opts.update_timestamps )
Пример #51
0
def notify_systemd(status):
    if daemon:
        daemon.notify(status)
Пример #52
0
#!/usr/bin/python2
# Written by Capane.us
# Modified by Han

import os, collections, signal, sys, subprocess, socket
import triforcetools
from systemd import daemon
from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate
from time import sleep
from subprocess import call
from ConfigParser import SafeConfigParser

# We are up, so tell systemd
daemon.notify("READY=1")

# Define restart program
def restart_program():
	# Restarts the current program.
	# Note: this function does not return. Any cleanup action (like
	# saving data) must be done before calling this function.
    python = sys.executable
    os.execl(python, python, * sys.argv)

# Define a signal handler to turn off LCD before shutting down
def handler(signum = None, frame = None):
    lcd = Adafruit_CharLCDPlate()
    lcd.clear()
    lcd.stop()
    sys.exit(0)
signal.signal(signal.SIGTERM , handler)
Пример #53
0
 def watchdog_ping(self) -> None:
     notify(Notification.WATCHDOG)
     thor.schedule(self.watchdog_freq, self.watchdog_ping)
Пример #54
0
	def run(self,ctx,**k):
		event = self.params(ctx)
		if len(event):
			raise SyntaxError("No parameters here")

		sd.notify("READY=1")
Пример #55
0
                            old.wait(10)
                        except psutil.TimeoutExpired:
                            old.kill()
                    except psutil.AccessDenied:
                        pass
            except psutil.NoSuchProcess:
                pass
        pidlock.break_lock()
        
    pidlock.acquire(timeout=10)
    app = PermalinkServer()
    app.listen(port=args.port, xheaders=True)

    def handler(signum, frame):
        logger.info("Received %s, shutting down...", signum)
        ioloop = tornado.ioloop.IOLoop.current()
        ioloop.add_callback_from_signal(ioloop.stop)
    
    signal.signal(signal.SIGHUP, handler)
    signal.signal(signal.SIGINT, handler)
    signal.signal(signal.SIGTERM, handler)

    try:
        from systemd.daemon import notify
        notify('READY=1\nMAINPID={}'.format(os.getpid()), True)
    except ImportError:
        pass
        
    tornado.ioloop.IOLoop.current().start()
    pidlock.release()
Пример #56
0
 def serve(self):
     if sd is not None and sd.booted():
         sd.notify("READY=1")
     return self.httpd.serve_forever()
Пример #57
0
 def systemd_ready():
     """Signal to systemd that the service has successfully started.
     """
     notify("READY=1")
Пример #58
0
def main():
    parser = argparse.ArgumentParser(
            description='SOCKS (SOCKS Protocol Version 5) server with '
                    'support non-regular use cases via pluggable features',
            )
    
    parser.add_argument(
            '--use-fork',
            action='store_true',
            help='use fork operation (after sockets creation)',
            )
    parser.add_argument(
            '--pid-file',
            metavar='PID-FILE-PATH',
            help='path to pid file',
            )
    parser.add_argument(
            'config',
            metavar='CONFIG-PATH',
            help='path to config file',
            )
    
    args = parser.parse_args()
    
    config = config_import.config_import(args.config)
    
    features_str = config.get('plasticine-socks-server', 'features', fallback=None)
    unix = config.get('plasticine-socks-server', 'unix', fallback=None)
    ip = config.get('plasticine-socks-server', 'ip', fallback=None)
    port = config.getint('plasticine-socks-server', 'port', fallback=None)
    
    if features_str:
        features = import_features(features_str, config, args.config)
    else:
        features = None
    
    socks_server_environ = {}
    
    socks_server.socks_server_preinit(
            socks_server_environ,
            features=features,
            )
    
    socks_server.socks_server_create_socks_sock(
            socks_server_environ,
            unix=unix,
            ip=ip,
            port=port,
            )
    
    socks_server.socks_server_before_fork(socks_server_environ)
    
    if args.use_fork:
        pid = os.fork()
        if pid:
            # XXX if used fork: than *parent* must to write pid of child
            if args.pid_file is not None:
                write_pid(args.pid_file, pid)
            
            os._exit(0)
    elif args.pid_file is not None:
        write_pid(args.pid_file, os.getpid())
    
    if daemon is not None:
        daemon.notify(
                'READY=1\nMAINPID={}'.format(os.getpid()),
                unset_environment=True,
                )
    
    socks_server.socks_server_after_fork(socks_server_environ)
    
    loop = asyncio.get_event_loop()
    
    def shutdown_handler():
        # XXX shutdown may be executed before of execution init (or init completed)
        asyncio.ensure_future(
                socks_server.socks_server_shutdown(socks_server_environ, loop),
                loop=loop,
                )
    try:
        loop.add_signal_handler(signal.SIGINT, shutdown_handler)
    except NotImplementedError:
        pass
    try:
        loop.add_signal_handler(signal.SIGTERM, shutdown_handler)
    except NotImplementedError:
        pass
    
    init_future = asyncio.ensure_future(
            socks_server.socks_server_init(socks_server_environ, loop),
            loop=loop,
            )
    
    loop.run_until_complete(init_future)
    
    serve_future = asyncio.ensure_future(
            socks_server.socks_server_serve(socks_server_environ),
            loop=loop,
            )
    
    loop.run_until_complete(serve_future)
    
    try:
        loop.remove_signal_handler(signal.SIGINT)
    except NotImplementedError:
        pass
    try:
        loop.remove_signal_handler(signal.SIGTERM)
    except NotImplementedError:
        pass
Пример #59
0
def test_notify_no_socket():
    assert notify('READY=1') == False
    assert notify('FDSTORE=1', fds=[]) == False
    assert notify('FDSTORE=1', fds=[1,2]) == False
    assert notify('FDSTORE=1', pid=os.getpid()) == False
    assert notify('FDSTORE=1', pid=os.getpid(), fds=(1,)) == False
Пример #60
0
def notify(*args, **kwargs):
    return sd.notify(*args, **kwargs)