Beispiel #1
0
    def test_command_stop_tap(self):
        """Test stop tap command"""
        args = CliArgs(target='target_one', tap='tap_one')
        pipelinewise = PipelineWise(args, CONFIG_DIR, VIRTUALENVS_DIR)

        # Tap is not running, pid file not exist, should exit with error
        with pytest.raises(SystemExit) as pytest_wrapped_e:
            pipelinewise.stop_tap()
        assert pytest_wrapped_e.type == SystemExit
        assert pytest_wrapped_e.value.code == 1

        # Stop tap command should stop all the child processes
        # 1. Start the pipelinewise mock executable that's running
        #    linux piped dummy tap and target connectors
        with pidfile.PIDFile(pipelinewise.tap['files']['pidfile']):
            os.spawnl(os.P_NOWAIT,
                      f'{RESOURCES_DIR}/test_stop_tap/scheduler-mock.sh',
                      'test_stop_tap/scheduler-mock.sh')
            # Wait 5 seconds making sure the dummy tap is running
            time.sleep(5)

            # Send the stop_tap command
            with pytest.raises(SystemExit):
                pipelinewise.stop_tap()

        # Should not have any remaining Pipelinewise related linux process
        for proc in psutil.process_iter(['cmdline']):
            full_command = ' '.join(proc.info['cmdline'])
            assert re.match('scheduler|pipelinewise|tap|target',
                            full_command) is None
Beispiel #2
0
def entrypoint():
    """
    Entry Point.

    Check for PID & RPC.

    If an overlay is already running then pass the args along and close

    Otherwise start up the overlay!
    """
    config_dir = os.path.join(xdg_config_home, "discover_overlay")
    os.makedirs(config_dir, exist_ok=True)
    line = ""
    for arg in sys.argv[1:]:
        line = "%s %s" % (line, arg)
    pid_file = os.path.join(config_dir, "discover_overlay.pid")
    rpc_file = os.path.join(config_dir, "discover_overlay.rpc")
    try:
        with pidfile.PIDFile(pid_file):
            logging.getLogger().setLevel(logging.INFO)
            Discover(rpc_file, line)
    except pidfile.AlreadyRunningError:
        logging.warning("Discover overlay is currently running")

        with open(rpc_file, "w") as tfile:
            tfile.write(line)
            logging.warning("Sent RPC command")
Beispiel #3
0
 def test_pidfile_exists_process_running(self, exists_mock, Process_mock,
                                         pid_exists_mock):
     exists_mock.return_value = True
     pid_exists_mock.return_value = True
     Process_mock.return_value = psutil.Process(os.getpid())
     with self.assertRaises(pidfile.AlreadyRunningError):
         with pidfile.PIDFile():
             assert True
Beispiel #4
0
def entrypoint():
    """
    Entry Point.

    Check for PID & RPC.

    If an overlay is already running then pass the args along and close

    Otherwise start up the overlay!
    """

    config_dir = os.path.join(xdg_config_home, "discover_overlay")
    os.makedirs(config_dir, exist_ok=True)
    line = ""
    for arg in sys.argv[1:]:
        line = "%s %s" % (line, arg)

    pid_file = os.path.join(config_dir, "discover_overlay.pid")
    rpc_file = os.path.join(config_dir, "discover_overlay.rpc")
    debug_file = os.path.join(config_dir, "output.txt")
    logging.getLogger().setLevel(logging.WARNING)
    FORMAT = "%(levelname)s - %(name)s - %(message)s"
    if "--debug" in sys.argv or "-v" in sys.argv:
        logging.getLogger().setLevel(logging.DEBUG)
        logging.basicConfig(filename=debug_file, format=FORMAT)
    else:
        logging.basicConfig(format=FORMAT)
    log = logging.getLogger(__name__)

    # Flatpak compat mode
    try:
        if "container" in os.environ and os.environ["container"] == "flatpak":
            if "--rpc" in sys.argv:
                with open(rpc_file, "w") as tfile:
                    tfile.write(line)
                    log.warning("Sent RPC command")
            else:
                log.info("Flatpak compat mode started")
                with open(rpc_file, "w") as tfile:
                    tfile.write("--close")
                Discover(rpc_file, debug_file, sys.argv[1:])
            return

        # Normal usage

        try:
            with pidfile.PIDFile(pid_file):
                Discover(rpc_file, debug_file, sys.argv[1:])
        except pidfile.AlreadyRunningError:
            log.warning("Discover overlay is currently running")

            with open(rpc_file, "w") as tfile:
                tfile.write(line)
                log.warning("Sent RPC command")
    except Exception as ex:
        log.error(ex)
        log.error(traceback.format_exc())
        sys.exit(1)
def main(wg_iface,
         base_url=None,
         config_file=None,
         verbose=False,
         dryrun=False):
    """
    Fetches data from Encrypt.me, parses local WireGuard interface
    configuration information and ensures all peers are configured correctly
    based on any changes.
    """
    # get the config data from Encrypt.me and from what is on the server now
    # then, using wg interface data we can decide:
    with pidfile.PIDFile('/tmp/refresh-wireguard.pid'):
        if dryrun:
            rem("*** DRY RUN (no changes will be made) ***")
        eme_resp_data, eme_conf = fetch_eme_conf(base_url,
                                                 config_file,
                                                 verbose=verbose)
        if verbose:
            rem("Found %d peers from Encrypt.me; saving to %s" %
                (len(eme_conf), PEERS_FILE))
        if not dryrun:
            with open(PEERS_FILE, 'w') as peers_file:
                peers_file.write(eme_resp_data)
        wg_conf = fetch_wg_conf(wg_iface, verbose=verbose)
        if verbose:
            rem("Found %d local WireGuard peers" % (len(wg_conf)))
        eme_pubkeys = frozenset(eme_conf.keys())
        wg_pubkeys = frozenset(wg_conf.keys())

        # --- we need to determine: ---
        # * which peers to remove
        pubkeys_old = wg_pubkeys - eme_pubkeys
        if verbose:
            rem("Removing %d old peers" % len(pubkeys_old))
        for pubkey in pubkeys_old:
            wg_down(wg_iface, wg_conf[pubkey], dryrun)

        # * which peers to possibly change the IP address of
        pubkeys_same = wg_pubkeys & eme_pubkeys
        changed = 0
        for pubkey in pubkeys_same:
            eme_ipv4 = eme_conf[pubkey]
            wg_ipv4 = wg_conf[pubkey].allowed_ips
            if eme_ipv4 != wg_ipv4:
                changed += 1
                wg_down(wg_iface, wg_conf[pubkey], dryrun, verbose)
                wg_up(wg_iface, pubkey, eme_conf[pubkey], dryrun, verbose)
        if verbose:
            rem("Changed %d peers to new IP addresses" % (changed))

        # * which peers to add
        pubkeys_new = eme_pubkeys - wg_pubkeys
        if verbose:
            rem("Adding %d new peers" % len(pubkeys_new))
        for pubkey in pubkeys_new:
            wg_up(wg_iface, pubkey, eme_conf[pubkey], dryrun, verbose)
Beispiel #6
0
def entrypoint():
    configDir = os.path.join(xdg_config_home, "discover_overlay")
    os.makedirs(configDir, exist_ok=True)
    line = ""
    for arg in sys.argv[1:]:
        line = "%s %s" % (line, arg)
    pid_file = os.path.join(configDir, "discover_overlay.pid")
    rpc_file = os.path.join(configDir, "discover_overlay.rpc")
    try:
        with pidfile.PIDFile(pid_file):
            logging.getLogger().setLevel(logging.INFO)
            Discover(rpc_file, line)
    except pidfile.AlreadyRunningError:
        logging.warn("Discover overlay is currently running")

        with open(rpc_file, "w") as tfile:
            tfile.write(line)
            logging.warn("Sent RPC command")
Beispiel #7
0
def main():
    print('Starting process')
    try:
        with pidfile.PIDFile():
            print('Process started')
    except pidfile.AlreadyRunningError:
        print('Already running.')
        print('Exiting')
        sys.exit(1)

    # Initialize Stuff
    print('initializing hume daemon')
    humed = Humed()

    # TODO: Tell systemd we are ready
    # systemd.daemon.notify('READY=1')

    print('Ready. serving...')
    humed.run()
Beispiel #8
0
from libs.core.conf import conf
from libs.core.azura import Azura

import atexit
import pidfile
import sys

if __name__ == "__main__":
    try:
        with pidfile.PIDFile(filename="azura.pid"):
            azura = Azura()
            atexit.register(azura.deconstruct)
            azura.run()
    except pidfile.AlreadyRunningError:
        conf.logger.log("Bot is already running.", type="ERRR")
        sys.exit(1)
Beispiel #9
0
    channel_video_uri = response.json()['uri'] + video_uri

    # See if the video is already in the services channel
    response = client.get(channel_video_uri, params={'fields': 'uri'})
    if response.status_code == 200:
        LOG.info("Video is already in the Services channel")
    else:
        LOG.info("Adding video to the Services channel")
        if not cmdargs.dry_run:
            response = client.put(channel_video_uri)
            response.raise_for_status()

        LOG.info('Video added to Services channel')

    message = "Wrapup complete"
    LOG.info(message)
    if not cmdargs.batch:
        try:
            zenity("--info", "--no-wrap", "--text", message, "--timeout", "60")
        except Exception:
            # Ignore any exception thrown due to timeout
            pass

if __name__ == "__main__":

    try:
        with pidfile.PIDFile('wrapup.pid'):
            main()
    except pidfile.AlreadyRunningError:
        LOG.error("Already running.  Exiting.")
    r = requests.get(url)
    if (r.status_code == 200):
        jResult = r.json()
        # TODO check for value
    else:
        # TODO should be handeled?
        print("sending to domoticz failed")

    return


if __name__ == '__main__':

    print('Starting up ...')
    try:
        with pidfile.PIDFile(PIDFILE):
            #print('Process started')
            # time.sleep(30)
            bus = SystemBus()
            loop = GLib.MainLoop()
            signal = bus.get('org.asamk.Signal')
            signal.onMessageReceived = msgRcv
            print('Startup complete')
            # Tell systemd that our service is ready
            notify(Notification.READY)

            loop.run()

    except pidfile.AlreadyRunningError:
        print('Already running.')
Beispiel #11
0
        #print ("mmmap file initialized")
        #fileStat()
        db.Stat.update_one({}, {
            '$set': {
                'scheduler.runningTasks': 0,
                'scheduler.scheduledTasks': 0,
                'scheduler.load': 0,
                'scheduler.processedTasks': 0
            }
        },
                           upsert=True)

    pool = multiprocessing.Pool(processes=poolsize, initializer=procInit)
    atexit.register(stop, pool)
    try:
        with pidfile.PIDFile(filename='mupifDB_scheduler_pidfile'):
            log.info("Starting MupifDB Workflow Scheduler\n")

            try:
                #if (1):

                #import first already scheduled executions
                log.info("Importing already scheduled executions")
                for wed in db.WorkflowExecutions.find({"Status": 'Scheduled'}):
                    # add the correspoding weid to the pool, change status to scheduled
                    weid = wed['_id']
                    req = pool.apply_async(executeWorkflow,
                                           args=(weid, ),
                                           callback=procFinish,
                                           error_callback=procError)
                    log.info("WEID %s added to the execution pool" % (weid))
Beispiel #12
0
def main(argv):
    PROG = Path(argv[0]).name

    logger = logging.getLogger(PROG)
    fh = logging.FileHandler(f"{PROG}.log")
    if os.environ.get("_PBENCH_UNIT_TESTS"):
        fmtstr = "%(levelname)s %(name)s %(funcName)s -- %(message)s"
    else:
        fmtstr = (
            "%(asctime)s %(levelname)s %(process)s %(thread)s"
            " %(name)s %(funcName)s %(lineno)d -- %(message)s"
        )
    fhf = logging.Formatter(fmtstr)
    fh.setFormatter(fhf)
    if os.environ.get("_PBENCH_TOOL_DATA_SINK_LOG_LEVEL") == "debug":
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    fh.setLevel(log_level)
    logger.addHandler(fh)
    logger.setLevel(log_level)

    try:
        redis_host = argv[1]
        redis_port = argv[2]
        param_key = argv[3]
    except IndexError as e:
        logger.error("Invalid arguments: %s", e)
        return 1

    global tar_path
    tar_path = find_executable("tar")
    if tar_path is None:
        logger.error("External 'tar' executable not found")
        return 2

    try:
        redis_server = redis.Redis(host=redis_host, port=redis_port, db=0)
    except Exception as e:
        logger.error(
            "Unable to connect to redis server, %s:%s: %s", redis_host, redis_port, e
        )
        return 3

    try:
        params_raw = redis_server.get(param_key)
        if params_raw is None:
            logger.error('Parameter key, "%s" does not exist.', param_key)
            return 4
        logger.debug("params_key (%s): %r", param_key, params_raw)
        params_str = params_raw.decode("utf-8")
        # The expected parameters for this "data-sink" is what "channel" to
        # subscribe to for the tool meister operational life-cycle.  The
        # data-sink listens for the state transitions, start | stop | send |
        # terminate, exiting when "terminate" is received, marking the state
        # in which data is captured.
        #
        # E.g. params = '{ "channel": "run-chan",
        #                  "benchmark_run_dir": "/loo/goo" }'
        params = json.loads(params_str)
        channel = params["channel"]
        benchmark_run_dir = Path(params["benchmark_run_dir"]).resolve(strict=True)
        tool_group = params["group"]
    except Exception as ex:
        logger.error("Unable to fetch and decode parameter key, %s: %s", param_key, ex)
        return 5
    else:
        if not benchmark_run_dir.is_dir():
            logger.error(
                "Run directory argument, %s, must be a real directory.",
                benchmark_run_dir,
            )
            return 6
        logger.debug("Tool Data Sink parameters check out, daemonizing ...")
        redis_server.connection_pool.disconnect()
        del redis_server

    # Before we daemonize, flush any data written to stdout or stderr.
    sys.stderr.flush()
    sys.stdout.flush()

    pidfile_name = f"{PROG}.pid"
    pfctx = pidfile.PIDFile(pidfile_name)
    with open(f"{PROG}.out", "w") as sofp, open(
        f"{PROG}.err", "w"
    ) as sefp, daemon.DaemonContext(
        stdout=sofp,
        stderr=sefp,
        working_directory=os.getcwd(),
        umask=0o022,
        pidfile=pfctx,
        files_preserve=[fh.stream.fileno()],
    ):
        try:
            # We have to re-open the connection to the redis server now that we
            # are "daemonized".
            logger.debug("constructing Redis() object")
            try:
                redis_server = redis.Redis(host=redis_host, port=redis_port, db=0)

            except Exception as e:
                logger.error(
                    "Unable to connect to redis server, %s:%s: %s",
                    redis_host,
                    redis_port,
                    e,
                )
                return 7
            else:
                logger.debug("constructed Redis() object")

            tds_app = ToolDataSink(
                redis_server, channel, benchmark_run_dir, tool_group, logger
            )
            tds_app.execute()
        except OSError as exc:
            if exc.errno == errno.EADDRINUSE:
                logger.error(
                    "ERROR - tool data sink failed to start, 0.0.0.0:8080 already in use"
                )
            else:
                logger.exception("ERROR - failed to start the tool data sink")
        except Exception:
            logger.exception("ERROR - failed to start the tool data sink")
        finally:
            logger.info("Remove pid file ... (%s)", pidfile_name)
            try:
                os.unlink(pidfile_name)
            except Exception:
                logger.exception("Failed to remove pid file %s", pidfile_name)

    return 0
Beispiel #13
0
#!/usr/bin/env python
"""
The Scoutix IRC bot
"""
import json
import sys

import pidfile

from bot import Bot
from config import Config


try:
    config = Config(json.loads(open('etc/config.json').read()))
except OSError as ex:
    print(f"Unable to read the config file.\n{type(ex).__name__}: {ex}")
    sys.exit(1)

with pidfile.PIDFile('scoutix.pid'):
    CLIENT = Bot(config.nick, sasl_username=config.sasl_username,
                 sasl_password=config.sasl_password)
    CLIENT.module_config = config.modules
    CLIENT.run(config.server, port=config.port, channels=config.channels,
               tls=config.tls, tls_verify=config.tls_verify)
Beispiel #14
0
 def serve_forever(self, poll_interval=0.5):
     logger.info(f'LAOBot Server: v{__version__}')
     with pidfile.PIDFile(constants.PIDFILE):
         super().serve_forever(poll_interval=poll_interval)
                            + "|\n"
                        )
            except Exception as details:
                log.error("Wonky line in dashboard " + str(type(details)) + " " + str(details))

    dashboard += "O========================================^=========^=============^=============^=============^=============^=============^======================================================O\n"
    # main_dashboard.refresh()
    # main_dashboard.addstr(1,0,dashboard)
    print(dashboard)
    schedul.enter(settings["redraw_dash_period"], 1, print_panel)


log.info("Starting...")
# Make/check PIDFILE to prevent duplicates.
try:
    with pidfile.PIDFile():
        try:
            settings = readmake_json("settings.json")
            module_list = readmake_json(settings["path_modulelist"])

            # Is correct user
            if os.environ["USER"] != settings["user"] and not os.environ.get("CHECKUSER", "").lower() == "false":
                log.error("Command should be run as '" + settings["user"] + "' as it owns licence files. ('export CHECKUSER=FALSE' to disable this check)")
                exit()

            # Clear
            open("run_as_admin.sh", "w").close()
            slurm_permissions = get_slurm_permssions()

            # An error will be thrown if reservation is updated without change.
            # Settings need to be fixed
        logger.critical(f"Could not find config file '{config_path}'. Quitting.")
        sys.exit(1)
    else:
        configs = configparser.ConfigParser()
        configs.read(config_path)
    URL = configs["css"]["url"]
    CSS_ID = configs["css"]["id"]

    ## logging
    logging_directory = Path(configs["misc"]["logging_directory"])
    if logging_directory.exists() and logging_directory.is_dir():
        logger.add(
            Path.joinpath(logging_directory, "scraper.error.log"),
            rotation="1 week",
            retention="6 week",
            level="ERROR",
        )
        logger.add(
            Path.joinpath(logging_directory, "scraper.info.log"),
            rotation="1 week",
            retention="2 week",
            level="INFO",
        )
    #running main loop
    try:
        with pidfile.PIDFile(configs["misc"]["pidfile"]):
            logger.info("Starting scraper")
            main(URL, CSS_ID, arguments["--headless"])
    except pidfile.AlreadyRunningError:
        logger.warning("Quitting! Scraper already running")
Beispiel #17
0
def watch(ctx, img_dir, streams_dir):
    with pidfile.PIDFile(Config.pidfile):
        _watch(img_dir, streams_dir)
Beispiel #18
0
 def test_pidfile_exists_read_fail(self, exists_mock, pid_exists_mock):
     exists_mock.return_value = True
     pid_exists_mock.return_value = True
     with pidfile.PIDFile():
         assert True
Beispiel #19
0
 def test_pidfile_exists_process_not_running(self, exists_mock,
                                             pid_exists_mock):
     exists_mock.return_value = True
     pid_exists_mock.return_value = False
     with pidfile.PIDFile():
         assert True
Beispiel #20
0
def main(argv):
    """Main program for the Tool Meister.

    This function is the simple driver for the tool meister behaviors,
    handling argument processing, logging setup, initial connection to
    Redis(), fetch and validation of operational paramters from Redis(), and
    then the daemonization of the ToolMeister operation.

    Arguments:  argv - a list of parameters

    Returns 0 on success, > 0 when an error occurs.

    """
    _prog = Path(argv[0])
    PROG = _prog.name
    pbench_bin = _prog.parent.parent.parent

    try:
        redis_host = argv[1]
        redis_port = argv[2]
        param_key = argv[3]
    except IndexError as e:
        print(f"Invalid arguments: {e}", file=sys.stderr)
        return 1

    global tar_path
    tar_path = find_executable("tar")
    if tar_path is None:
        print("External 'tar' executable not found.", file=sys.stderr)
        return 2

    logger = logging.getLogger(PROG)
    fh = logging.FileHandler(f"{param_key}.log")
    if os.environ.get("_PBENCH_UNIT_TESTS"):
        fmtstr = "%(levelname)s %(name)s %(funcName)s -- %(message)s"
    else:
        fmtstr = (
            "%(asctime)s %(levelname)s %(process)s %(thread)s"
            " %(name)s %(funcName)s %(lineno)d -- %(message)s"
        )
    fhf = logging.Formatter(fmtstr)
    fh.setFormatter(fhf)
    if os.environ.get("_PBENCH_TOOL_MEISTER_LOG_LEVEL") == "debug":
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    fh.setLevel(log_level)
    logger.addHandler(fh)
    logger.setLevel(log_level)

    try:
        redis_server = redis.Redis(host=redis_host, port=redis_port, db=0)
    except Exception as e:
        logger.error(
            "Unable to construct Redis client, %s:%s: %s", redis_host, redis_port, e
        )
        return 3

    try:
        params_raw = redis_server.get(param_key)
        if params_raw is None:
            logger.error('Parameter key, "%s" does not exist.', param_key)
            return 4
        logger.info("params_key (%s): %r", param_key, params_raw)
        params_str = params_raw.decode("utf-8")
        params = json.loads(params_str)
        # Validate the tool meister parameters without constructing an object
        # just yet, as we want to make sure we can talk to the redis server
        # before we go through the trouble of daemonizing below.
        ToolMeister.fetch_params(params)
    except Exception as exc:
        logger.error(
            "Unable to fetch and decode parameter key, '%s': %s", param_key, exc
        )
        return 5
    else:
        redis_server.connection_pool.disconnect()
        del redis_server

    # Before we daemonize, flush any data written to stdout or stderr.
    sys.stderr.flush()
    sys.stdout.flush()

    ret_val = 0
    pidfile_name = f"{param_key}.pid"
    pfctx = pidfile.PIDFile(pidfile_name)
    with open(f"{param_key}.out", "w") as sofp, open(
        f"{param_key}.err", "w"
    ) as sefp, daemon.DaemonContext(
        stdout=sofp,
        stderr=sefp,
        working_directory=os.getcwd(),
        umask=0o022,
        pidfile=pfctx,
        files_preserve=[fh.stream.fileno()],
    ):
        try:
            # Previously we validated the tool meister parameters, and in
            # doing so made sure we had proper access to the redis server.
            #
            # We can safely create the ToolMeister object now that we are
            # "daemonized".
            logger.debug("constructing Redis() object")
            try:
                # NOTE: we have to recreate the connection to the redis
                # service since all open file descriptors were closed as part
                # of the daemonizing process.
                redis_server = redis.Redis(host=redis_host, port=redis_port, db=0)
            except Exception as e:
                logger.error(
                    "Unable to connect to redis server, %s:%s: %s",
                    redis_host,
                    redis_port,
                    e,
                )
                return 6
            else:
                logger.debug("constructed Redis() object")

            # FIXME: we should establish signal handlers that do the following:
            #   a. handle graceful termination (TERM, INT, QUIT)
            #   b. log operational state (HUP maybe?)

            try:
                tm = ToolMeister(pbench_bin, params, redis_server, logger)
            except Exception:
                logger.exception(
                    "Unable to construct the ToolMeister object with params, %r",
                    params,
                )
                return 7

            terminate = False
            try:
                while not terminate:
                    try:
                        logger.debug("waiting ...")
                        action, data = tm.wait_for_command()
                        logger.debug("acting ... %r, %r", action, data)
                        failures = action(data)
                        if failures > 0:
                            logger.warning(
                                "%d failures encountered for action, %r,"
                                " on data, %r",
                                failures,
                                action,
                                data,
                            )
                    except Terminate:
                        logger.info("terminating")
                        terminate = True
            except Exception:
                logger.exception("Unexpected error encountered")
                ret_val = 8
            finally:
                tm.cleanup()
        finally:
            logger.info("Remove pid file ... (%s)", pidfile_name)
            try:
                os.unlink(pidfile_name)
            except Exception:
                logger.exception("Failed to remove pid file %s", pidfile_name)

    return ret_val
Beispiel #21
0
 def test_pidfile_not_exists(self, exists_mock):
     exists_mock.return_value = False
     with pidfile.PIDFile():
         assert True