Пример #1
0
def terminate_node(
    node_uid: str,
    pid: Optional[int] = None
):
    if not node_uid:
        raise ValueError()
    try:
        if pid is not None:
            terminate_process(
                node_uid,
                pid,
                getenv(
                    constants.PROCESS_TERMINATION_TIMEOUT_ENVNAME,
                    float
                )
            )
        else:
            for proc in psutil.process_iter(['environ', 'pid']):
                try:
                    env = proc.info['environ']
                    if env and constants.NODE_UID_ENVNAME in env:
                        if env[constants.NODE_UID_ENVNAME] == node_uid:
                            terminate_process(
                                node_uid,
                                proc.info['pid'],
                                getenv(
                                    constants.PROCESS_TERMINATION_TIMEOUT_ENVNAME,
                                    float
                                )
                            )
                except (psutil.NoSuchProcess, psutil.AccessDenied):
                    pass
    except Exception:
        logger.exception('error terminating node')
Пример #2
0
 def set_pid_entry(
     self, *,
     pid: int = os.getpid(),
     process_uid: str = getenv(constants.PROCESS_UID_ENVNAME, str),
     node_uid = getenv(constants.NODE_UID_ENVNAME, str) \
     if envexists(constants.NODE_UID_ENVNAME) else None,
     cluster_uid = getenv(constants.CLUSTER_UID_ENVNAME, str) \
     if envexists(constants.CLUSTER_UID_ENVNAME) else None
 ):
     self[pid] = dict(create_time=psutil.Process(pid).create_time(),
                      process_uid=process_uid,
                      node_uid=node_uid,
                      cluster_uid=cluster_uid)
Пример #3
0
def wait(*args):
    args = list(args)
    if not args:
        raise ValueError()
    if isinstance(args[-1], types.FunctionType):
        condition = args.pop()
    else:
        condition = lambda: True
    if not all(isinstance(arg, Entity) for arg in args):
        raise ValueError()
    if len(args) > 0:
        if [(arg.site_uuid, arg.namespace) for arg in args].count(
            (args[0].site_uuid, args[0].namespace)) != len(args):
            raise ValueError()
    versions = []
    for _ in polling_loop(
            getenv(constants.ADAPTER_POLLING_INTERVAL_ENVNAME, float)):
        if len(args) > 0:
            _, env, _, _, _, _ = get_environment_threadsafe(
                args[0].storage_path, args[0].namespace, create=False)
            with transaction_context(env, write=False):
                if not versions:
                    versions = [arg.version for arg in args]
                    if condition():
                        break
                else:
                    if any(versions[i] != arg.version
                           for i, arg in enumerate(args)):
                        if condition():
                            break
                        versions = [arg.version for arg in args]
        else:
            if condition():
                break
Пример #4
0
def set_namespace_size(size: int, storage_path: str, namespace: str):
    _, env, _, _, _, _ = get_environment_threadsafe(storage_path,
                                                    namespace,
                                                    create=False)
    _, site_env, _, _, _, _ = get_environment_threadsafe(
        storage_path, constants.ROOT_NAMESPACE, create=False)
    file_lock = filelock.FileLock(
        getenv(constants.GLOBAL_FILE_LOCK_PATH_ENVNAME, str))
    with environment_lock:
        with file_lock:
            env.set_mapsize(size)
            try:
                txn = None
                txn = site_env.begin(write=True, buffers=False)
                assert txn.put(key=namespace.encode('utf-8'),
                               value=struct.pack('@N', size))
                txn.commit()
                mapsize[namespace] = size
            except BaseException as exc:
                if txn:
                    try:
                        txn.abort()
                    except lmdb.Error:
                        pass
                if isinstance(exc, lmdb.Error):
                    raise TransactionError() from exc
                raise exc
Пример #5
0
def get_concurrency(*, site_uuid: Optional[str] = None) -> int:
    state = Dict(
        constants.CLUSTER_STATE_DICT_PATH, site_uuid = site_uuid,
        create = True, bind = True
    )
    _, env, _, _, _, _ = get_environment_threadsafe(
        state.storage_path, state.namespace, create = False
    )
    with transaction_context(env, write = True):
        if 'concurrency' not in state:
            state['concurrency'] = getenv(constants.CLUSTER_CONCURRENCY_ENVNAME, int)
    return state['concurrency']
Пример #6
0
 def __init__(
     self,
     path: Optional[str] = constants.PIDTABLE_DICT_PATH,
     /, *,
     site_uuid: Optional[str] = \
     get_site_uuid(getenv(constants.GLOBAL_SITE_STORAGE_PATH_ENVNAME, str)),
Пример #7
0
from typing import (Any, Optional, Union)

import psutil

import parkit.constants as constants
import parkit.storage.threadlocal as thread

from parkit.adapters.dict import Dict
from parkit.storage.context import transaction_context
from parkit.storage.site import (get_site_uuid, import_site)
from parkit.utility import (envexists, getenv)

logger = logging.getLogger(__name__)

import_site(getenv(constants.GLOBAL_SITE_STORAGE_PATH_ENVNAME, str),
            create=True)


class PidTable(Dict):

    def __init__(
        self,
        path: Optional[str] = constants.PIDTABLE_DICT_PATH,
        /, *,
        site_uuid: Optional[str] = \
        get_site_uuid(getenv(constants.GLOBAL_SITE_STORAGE_PATH_ENVNAME, str)),
        create: bool = True,
        bind: bool = True
    ):
Пример #8
0
    parser.add_argument('--level')

    args = parser.parse_args()

    levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']

    if args.level is not None:
        levels = levels[levels.index(args.level):]

    with snapshot(syslog):
        version = syslog.version
        index = len(syslog)

    print('welcome to syslog')
    print('installation path:',
          getenv(constants.GLOBAL_SITE_STORAGE_PATH_ENVNAME, str))
    if syslog.maxsize is None:
        print('syslog length is unbounded')
    else:
        print('syslog holds a maximum of', syslog.maxsize, 'entries')
    while True:
        wait(syslog, lambda: syslog.version > version)
        with snapshot(syslog):
            n_new_entries = syslog.version - version
            if syslog.maxsize is None:
                for _ in range(n_new_entries):
                    record = syslog[index]
                    index += 1
                    if any(''.join([level, '@']) in record
                           for level in levels):
                        print(record)
Пример #9
0
                pass
        return nodes

    try:
        if priority_filter is not None:
            nodes = get_nodes()
            priority_nodes = [
                (node_uid, pid) for node_uid, pid in nodes \
                if [True for match in priority_filter if match in node_uid]
            ]
            for node_uid, pid in priority_nodes:
                terminate_process(
                    node_uid,
                    pid,
                    getenv(
                        constants.PROCESS_TERMINATION_TIMEOUT_ENVNAME,
                        float
                    )
                )
        threads = []

        class Terminator(threading.Thread):

            def __init__(self, node_uid, pid):
                super().__init__()
                self._node_uid = node_uid
                self._pid = pid

            def run(self):
                 terminate_node(self._node_uid, self._pid)

        for node_uid, pid in get_nodes():
Пример #10
0
def task() -> Optional[Task]:
    try:
        return pickle.loads(getenv(constants.SELF_ENVNAME, str).encode())
    except ValueError:
        return None
Пример #11
0
import logging

import parkit.constants as constants

from parkit.storage.site import set_default_site
from parkit.system.pidtable import pidtable
from parkit.utility import (envexists, getenv)

logger = logging.getLogger(__name__)

if envexists(constants.DEFAULT_SITE_PATH_ENVNAME):
    set_default_site(getenv(constants.DEFAULT_SITE_PATH_ENVNAME, str),
                     create=True)

pidtable.set_pid_entry()
Пример #12
0
import parkit.constants as constants

from parkit.adapters.scheduler import Scheduler
from parkit.exceptions import ObjectNotFoundError
from parkit.storage.namespace import Namespace
from parkit.storage.site import get_default_site
from parkit.utility import (getenv, polling_loop)

logger = logging.getLogger(__name__)

if __name__ == '__main__':

    try:

        node_uid = getenv(constants.NODE_UID_ENVNAME, str)
        cluster_uid = getenv(constants.CLUSTER_UID_ENVNAME, str)

        logger.info('scheduler (%s) started for site %s', node_uid,
                    get_default_site())

        for i in polling_loop(
                getenv(constants.SCHEDULER_HEARTBEAT_INTERVAL_ENVNAME, float)):
            for scheduler in Namespace(constants.SCHEDULER_NAMESPACE,
                                       create=True):
                if isinstance(scheduler, Scheduler):
                    try:
                        if scheduler.is_scheduled():
                            scheduler.asyncable(*scheduler.args,
                                                **scheduler.kwargs)
                    except ObjectNotFoundError:
Пример #13
0
    terminate_node
)
from parkit.storage.site import get_default_site
from parkit.system.cluster import get_concurrency
from parkit.system.pidtable import pidtable
from parkit.utility import (
    getenv,
    polling_loop
)

logger = logging.getLogger(__name__)

if __name__ == '__main__':

    try:
        node_uid = getenv(constants.NODE_UID_ENVNAME, str)
        cluster_uid = getenv(constants.CLUSTER_UID_ENVNAME, str)

        logger.info('monitor (%s) started for site %s', node_uid, get_default_site())

        polling_interval = getenv(constants.MONITOR_POLLING_INTERVAL_ENVNAME, float)

        termination_queue = Queue(constants.NODE_TERMINATION_QUEUE_PATH, create = True)

        for i in polling_loop(polling_interval):

            try:

                #
                # Restart nodes if needed
                #
Пример #14
0
import logging

from typing import Any

import parkit.constants as constants

from parkit.adapters.array import Array
from parkit.storage.site import (
    get_site_uuid,
    import_site
)
from parkit.utility import getenv

import_site(getenv(constants.GLOBAL_SITE_STORAGE_PATH_ENVNAME, str), create = True)

syslog: Array = Array(
    constants.SYSLOG_PATH,
    maxsize = getenv(constants.MAX_SYSLOG_ENTRIES_ENVNAME, int),
    site_uuid = get_site_uuid(getenv(constants.GLOBAL_SITE_STORAGE_PATH_ENVNAME, str)),
    create = True, bind = True
)

class LogHandler(logging.StreamHandler):

    def emit(self, record: Any):
        syslog.append(self.format(record))

logging.basicConfig(
    format = '%(asctime)s %(levelname)s@%(name)s : %(message)s',
    level = logging.INFO,
    handlers = [LogHandler()]
Пример #15
0
if not envexists(constants.GLOBAL_FILE_LOCK_PATH_ENVNAME):
    path = os.path.abspath(os.path.join(
        tempfile.gettempdir(),
        constants.GLOBAL_FILE_LOCK_FILENAME
    ))
    setenv(constants.GLOBAL_FILE_LOCK_PATH_ENVNAME, path)

if not envexists(constants.GLOBAL_SITE_STORAGE_PATH_ENVNAME):
    path = os.path.abspath(os.path.join(
        tempfile.gettempdir(),
        constants.PARKIT_TEMP_SITE_DIRNAME
    ))
    setenv(constants.GLOBAL_SITE_STORAGE_PATH_ENVNAME, path)
else:
    path = os.path.abspath(getenv(constants.GLOBAL_SITE_STORAGE_PATH_ENVNAME, str))
    setenv(constants.GLOBAL_SITE_STORAGE_PATH_ENVNAME, path)

for profile_name in get_lmdb_profiles():
    for name, default in cast(dict, get_lmdb_profiles())[profile_name].copy().items():
        if envexists('_'.join([profile_name.upper(), name])):
            if checkenv('_'.join([profile_name.upper(), name]), type(default)):
                cast(dict, get_lmdb_profiles())[profile_name][name] = \
                getenv('_'.join([profile_name.upper(), name]), type(default))

if not envexists(constants.CLUSTER_CONCURRENCY_ENVNAME):
    setenv(constants.CLUSTER_CONCURRENCY_ENVNAME, str(constants.DEFAULT_CLUSTER_CONCURRENCY))

if not envexists(constants.MAX_SYSLOG_ENTRIES_ENVNAME):
    setenv(constants.MAX_SYSLOG_ENTRIES_ENVNAME, str(constants.DEFAULT_MAX_SYSLOG_ENTRIES))