def configure(
    device: Any,
    device_name: str = None,
    level=tango.LogLevel.LOG_INFO,
    handlers: Iterable[logging.Handler] = None,
) -> None:
    """Configure logging for a TANGO device.

    This modifies the logging behaviour of the device class.

    :param device: to configure.
    :param device_name: alternate device name. default: None
    :param level: tango level to log. default: INFO
    :param handlers iterable of extra log handlers to install
    """

    device_class = type(device)
    if device_name is None:
        device_name = device.get_name()

    # Monkey patch the tango device logging to redirect to python.
    TangoFilter.device_name = device_name
    device_class.debug_stream = TangoFilter.log_man.make_fn(logging.DEBUG)
    device_class.info_stream = TangoFilter.log_man.make_fn(logging.INFO)
    device_class.warn_stream = TangoFilter.log_man.make_fn(logging.WARNING)
    device_class.error_stream = TangoFilter.log_man.make_fn(logging.ERROR)
    device_class.fatal_stream = TangoFilter.log_man.make_fn(logging.CRITICAL)
    # device_class.get_logger = lambda self: get_logger()

    # Now initialise the logging.
    configure_logging(level=to_python_level(level), tags_filter=TangoFilter)
    log = get_logger()
    for handler in log.handlers:
        log.removeHandler(handler)

    if handlers is None:
        handlers = []

    # If it's a real tango device, add a handler.
    if FEATURE_TANGO_LOGGER.is_active():
        if isinstance(device, Device):
            log.debug("Adding tango logging handler")
            handlers.append(TangoLoggingServiceHandler(device.get_logger()))
        else:
            cls = type(device)
            log.debug("Device %s is not a tango server device: %s", cls,
                      cls.mro())

    tango_filter = TangoFilter()
    for handler in handlers:
        log.debug("add handler %s", handler.__class__.__name__)
        handler.addFilter(tango_filter)
        handler.setFormatter(get_default_formatter(tags=True))
        log.addHandler(handler)

    log.debug("Configured logging for device %s", device_name)
def main(backend=None):
    """
    Start the processing controller.

    :param backend: config DB backend

    """
    configure_logging(level=LOG_LEVEL)

    # Register SIGTERM handler
    signal.signal(signal.SIGTERM, terminate)

    # Initialise processing controller
    proccontrol = ProcessingController()

    # Enter main loop
    proccontrol.main_loop(backend=backend)
The purpose of this workflow is to test the mechanism for generating SDP
receive addresses from the channel link map contained in the SBI. The workflow
picks it up from there, uses it to generate the receive addresses for each scan
type and writes them to the processing block state. The subarray publishes this
address map on the appropriate attribute to complete the transition following
AssignResources.

This workflow does not generate any deployments.
"""

import logging
import ska_ser_logging
from ska_sdp_workflow import workflow

ska_ser_logging.configure_logging()
LOG = logging.getLogger("test_receive_addresses")
LOG.setLevel(logging.DEBUG)

# Claim processing block
pb = workflow.ProcessingBlock()

# Default maximum number of channels per receive process
max_channels = 20

# Port configuration
port_start = 9000
num_ports = 1

# Get the channel link map from SBI
scan_types = pb.get_scan_types()
Exemple #4
0
"""
Reading oet.ini file value and initializing constant of feature toggle with enabling
event based polling/pubsub
"""
import os.path

from pkg_resources import resource_filename
from pubsub import pub
from ska_ser_logging import configure_logging
from tblib import pickling_support

import oet.event.topics

from .features import Features

# Set pypubsub to throw an error if topic in sendMessage does not correspond
# to a topic in the topic tree defined in oet.event.topics
pub.setTopicUnspecifiedFatal(True)

# Load the topic tree definition
pub.addTopicDefnProvider(oet.event.topics, pub.TOPIC_TREE_FROM_CLASS)

configure_logging()
pickling_support.install()

FEATURES = Features.create_from_config_files(
    os.path.expanduser("~/oet.ini"), resource_filename(__name__, "oet.ini")
)
    "SDP_CHART_REPO_URL",
    "https://gitlab.com/ska-telescope/sdp/ska-sdp-helmdeploy-charts/-/raw/master/chart-repo/",
)
CHART_REPO_REFRESH = int(os.getenv("SDP_CHART_REPO_REFRESH", "300"))
LOG_LEVEL = os.getenv("SDP_LOG_LEVEL", "DEBUG")

# Name to use for the Helm deployer's own repository
CHART_REPO_NAME = "helmdeploy"
# Chart repositories to use, as a list of (name, url) pairs
CHART_REPO_LIST = [
    (CHART_REPO_NAME, CHART_REPO_URL),
    ("dask", "https://helm.dask.org/"),
]

# Initialise logger.
configure_logging(level=LOG_LEVEL)
log = logging.getLogger(__name__)


def invoke(*cmd_line):
    """
    Invoke a command with the given command line.

    :returns: output of the command
    :raises: ``subprocess.CalledProcessError`` if command returns an error status

    """
    # Perform call
    log.debug(" ".join(["$"] + list(cmd_line)))
    result = subprocess.run(
        cmd_line,