示例#1
0
                "tags": [
                    "computer"
                ],
                "icon_file": "/var/lib/domogik//domogik_packages/plugin_diskfree/design/icon.png",
                "domogik_min_version": "0.4.0",
                "package_id": "plugin-diskfree",
                "dependencies": [
                    
                ],
                "version": "1.0",
                "type": "plugin",
                "name": "diskfree"
            },
            "json_version": 2
        }
    }'
]"""

import zmq
from zmq.eventloop.ioloop import IOLoop
from domogikmq.reqrep.client import MQSyncReq
from domogikmq.message import MQMessage

cli = MQSyncReq(zmq.Context())
msg = MQMessage()
msg.set_action('device_types.get')
#msg.add_data('device_type', 'diskfree.disk_usage')
print(cli.request('manager', msg.get(), timeout=10).get())


示例#2
0
def main():
  context = zmq.Context()
  socket = context.socket(zmq.REQ)
  socket.connect(os.environ["QP_RUN_ADDRESS"])

  def send(msg,expected):
    print "Send  : ", msg
    socket.send(msg)
    reply = socket.recv()
    print "Reply   : ", ':'+reply+':'
    if (reply != expected):
      print "Expected: ", ':'+expected+':'
    print ""
    assert (reply == expected)
       

  send("new_job ao_integrals tcp://130.120.229.139:12345 inproc://ao_integrals",
       "ok")
  send("new_job ao_integrals tcp://130.120.229.139:12345 inproc://ao_integrals",
       "error A job is already running")

#  send("connect","error Message not understood : connect")

  send("connect  tcp","connect_reply ao_integrals 1 tcp://130.120.229.139:12345")
  send("connect  inproc","connect_reply ao_integrals 2 inproc://ao_integrals")
  send("disconnect ao_integrals 3","error Queuing_system.ml:68:2 : disconnect ao_integrals 3")
  send("disconnect ao_integrals 2","disconnect_reply ao_integrals")
  send("connect  inproc","connect_reply ao_integrals 3 inproc://ao_integrals")

  send("add_task ao_integrals triangle 3", "ok")
  send("add_task ao_integrals range 4 7", "ok")

  for i in range(8,11):
     send("add_task ao_integrals %d %d"%(i,i+10), "ok")

  send("get_task ao_integrals 3", "get_task_reply 10 10 20")
  send("get_task ao_integrals 3", "get_task_reply 9 9 19")
  send("get_task ao_integrals 3", "get_task_reply 8 8 18")

  send("task_done ao_integrals 3 10", "ok")
  send("task_done ao_integrals 3 9", "ok")
  send("task_done ao_integrals 3 8", "ok")
  send("del_task ao_integrals 10", "del_task_reply more 10")
  send("del_task ao_integrals 9", "del_task_reply more 9")
  send("del_task ao_integrals 8", "del_task_reply more 8")
  send("del_task ao_integrals 10", "error Task 10 is already deleted : del_task ao_integrals 10")

  send("get_task ao_integrals 1", "get_task_reply 7 4")
  send("get_task ao_integrals 3", "get_task_reply 6 5")
  send("get_task ao_integrals 1", "get_task_reply 5 6")
  send("get_task ao_integrals 3", "get_task_reply 4 7")
  send("get_task ao_integrals 3", "get_task_reply 3 1 3")
  send("get_task ao_integrals 1", "get_task_reply 2 2 3")
  send("get_task ao_integrals 1", "get_task_reply 1 3 3")

  send("task_done ao_integrals 1 1", "ok")
  send("task_done ao_integrals 1 2", "ok")
  send("task_done ao_integrals 3 3", "ok")
  send("task_done ao_integrals 3 4", "ok")
  send("task_done ao_integrals 1 5", "ok")
  send("task_done ao_integrals 1 6", "error Queuing_system.ml:81:30 : task_done ao_integrals 1 6")
  send("task_done ao_integrals 3 6", "ok")
  send("task_done ao_integrals 1 7", "ok")

  send("del_task ao_integrals 1", "del_task_reply more 1")
  send("del_task ao_integrals 2", "del_task_reply more 2")
  send("del_task ao_integrals 3", "del_task_reply more 3")
  send("del_task ao_integrals 4", "del_task_reply more 4")
  send("del_task ao_integrals 5", "del_task_reply more 5")
  send("del_task ao_integrals 6", "del_task_reply more 6")
  send("del_task ao_integrals 7", "del_task_reply done 7")

  send("end_job ao_integrals","ok")
  send("end_job ao_integrals","error No job is running")
  send("terminate","ok")
示例#3
0
def thermald_thread():
    setup_eon_fan()

    # prevent LEECO from undervoltage
    BATT_PERC_OFF = 10 if LEON else 3

    # now loop
    context = zmq.Context()
    thermal_sock = messaging.pub_sock(context, service_list['thermal'].port)
    health_sock = messaging.sub_sock(context, service_list['health'].port)
    location_sock = messaging.sub_sock(context,
                                       service_list['gpsLocation'].port)
    fan_speed = 0
    count = 0

    off_ts = None
    started_ts = None
    ignition_seen = False
    started_seen = False
    passive_starter = LocationStarter()
    thermal_status = ThermalStatus.green
    health_sock.RCVTIMEO = 1500

    params = Params()

    while 1:
        td = messaging.recv_sock(health_sock, wait=True)
        location = messaging.recv_sock(location_sock)
        location = location.gpsLocation if location else None
        msg = read_thermal()

        # loggerd is gated based on free space
        statvfs = os.statvfs(ROOT)
        avail = (statvfs.f_bavail * 1.0) / statvfs.f_blocks

        # thermal message now also includes free space
        msg.thermal.freeSpace = avail
        with open("/sys/class/power_supply/battery/capacity") as f:
            msg.thermal.batteryPercent = int(f.read())
        with open("/sys/class/power_supply/battery/status") as f:
            msg.thermal.batteryStatus = f.read().strip()
        with open("/sys/class/power_supply/battery/current_now") as f:
            msg.thermal.batteryCurrent = int(f.read())
        with open("/sys/class/power_supply/battery/voltage_now") as f:
            msg.thermal.batteryVoltage = int(f.read())
        with open("/sys/class/power_supply/usb/online") as f:
            msg.thermal.usbOnline = bool(int(f.read()))

        # TODO: add car battery voltage check
        max_cpu_temp = max(msg.thermal.cpu0, msg.thermal.cpu1,
                           msg.thermal.cpu2, msg.thermal.cpu3) / 10.0
        max_comp_temp = max(max_cpu_temp, msg.thermal.mem / 10.,
                            msg.thermal.gpu / 10.)
        bat_temp = msg.thermal.bat / 1000.
        fan_speed = handle_fan(max_cpu_temp, bat_temp, fan_speed)
        msg.thermal.fanSpeed = fan_speed

        # thermal logic with hysterisis
        if max_cpu_temp > 107. or bat_temp >= 63.:
            # onroad not allowed
            thermal_status = ThermalStatus.danger
        elif max_comp_temp > 95. or bat_temp > 60.:
            # hysteresis between onroad not allowed and engage not allowed
            thermal_status = clip(thermal_status, ThermalStatus.red,
                                  ThermalStatus.danger)
        elif max_cpu_temp > 90.0:
            # hysteresis between engage not allowed and uploader not allowed
            thermal_status = clip(thermal_status, ThermalStatus.yellow,
                                  ThermalStatus.red)
        elif max_cpu_temp > 85.0:
            # uploader not allowed
            thermal_status = ThermalStatus.yellow
        elif max_cpu_temp > 75.0:
            # hysteresis between uploader not allowed and all good
            thermal_status = clip(thermal_status, ThermalStatus.green,
                                  ThermalStatus.yellow)
        else:
            # all good
            thermal_status = ThermalStatus.green

        # **** starting logic ****

        # start constellation of processes when the car starts
        ignition = td is not None and td.health.started
        ignition_seen = ignition_seen or ignition

        # add voltage check for ignition
        if not ignition_seen and td is not None and td.health.voltage > 13500:
            ignition = True

        do_uninstall = params.get("DoUninstall") == "1"
        accepted_terms = params.get("HasAcceptedTerms") == "1"
        completed_training = params.get(
            "CompletedTrainingVersion") == training_version

        should_start = ignition

        # have we seen a panda?
        passive = (params.get("Passive") == "1")

        # start on gps movement if we haven't seen ignition and are in passive mode
        should_start = should_start or (
            not (ignition_seen and td)  # seen ignition and panda is connected
            and passive and passive_starter.update(started_ts, location))

        # with 2% left, we killall, otherwise the phone will take a long time to boot
        should_start = should_start and msg.thermal.freeSpace > 0.02

        # require usb power in passive mode
        should_start = should_start and (not passive or msg.thermal.usbOnline)

        # confirm we have completed training and aren't uninstalling
        should_start = should_start and accepted_terms and (
            passive or completed_training) and (not do_uninstall)

        # if any CPU gets above 107 or the battery gets above 63, kill all processes
        # controls will warn with CPU above 95 or battery above 60
        if thermal_status >= ThermalStatus.danger:
            # TODO: Add a better warning when this is happening
            should_start = False

        if should_start:
            off_ts = None
            if started_ts is None:
                params.car_start()
                started_ts = sec_since_boot()
                started_seen = True
        else:
            started_ts = None
            if off_ts is None:
                off_ts = sec_since_boot()

            # shutdown if the battery gets lower than 3%, it's discharging, we aren't running for
            # more than a minute but we were running
            if msg.thermal.batteryPercent < BATT_PERC_OFF and msg.thermal.batteryStatus == "Discharging" and \
               started_seen and (sec_since_boot() - off_ts) > 60:
                os.system('LD_LIBRARY_PATH="" svc power shutdown')

        msg.thermal.started = started_ts is not None
        msg.thermal.startedTs = int(1e9 * (started_ts or 0))

        msg.thermal.thermalStatus = thermal_status
        thermal_sock.send(msg.to_bytes())
        print msg

        # report to server once per minute
        if (count % 60) == 0:
            cloudlog.event("STATUS_PACKET",
                           count=count,
                           health=(td.to_dict() if td else None),
                           location=(location.to_dict() if location else None),
                           thermal=msg.to_dict())

        count += 1
示例#4
0
 def __init__(self, address="tcp://*:5555", data=[]):
     self.data = data
     self.context = zmq.Context()
     self.socket = self.context.socket(zmq.REP)
     self.socket.bind(address)
 def _prefix_mgr_server():
     prefix_mgr_server = PrefixMgr(zmq.Context(), zmq_socket_url)
     for _ in range(num_req):
         prefix_mgr_server.process_request()
示例#6
0
def main():
    verbose = '-v' in sys.argv
    ctx = zmq.Context()

    # Create MDP client session with short timeout
    # this client is used by service_success method
    client = MajorDomoClient("tcp://localhost:5555", verbose)
    client.timeout = 1000  # 1 sec
    client.retries = 1  # only 1 retry

    request_pipe, peer = zpipe(ctx)
    request_thread = threading.Thread(target=titanic_request,
                                      args=(
                                          peer,
                                          verbose,
                                      ))
    request_thread.daemon = True
    request_thread.start()
    reply_thread = threading.Thread(target=titanic_reply, args=(verbose, ))
    reply_thread.daemon = True
    reply_thread.start()
    close_thread = threading.Thread(target=titanic_close, args=(verbose, ))
    close_thread.daemon = True
    close_thread.start()

    poller = zmq.Poller()
    poller.register(request_pipe, zmq.POLLIN)

    # Ensure message directory exists
    TITANIC_DIR.mkdir(parents=True, exist_ok=True)
    # create the dispatcher queue file, if not present
    queue = TITANIC_DIR.joinpath('queue')
    queue.touch()

    # Main dispatcher loop
    while True:
        # We'll dispatch once per second, if there's no activity
        try:
            items = poller.poll(1000)
        except KeyboardInterrupt:
            break  # Interrupted

        if items:
            # Append UUID to queue, prefixed with '-' for pending
            uuid = request_pipe.recv()
            with open(queue, 'a') as f:
                f.write(f"-{uuid.decode()}\n")

        # Brute-force dispatcher
        with open(queue, 'r+b') as f:
            for entry in f.readlines():
                entry = entry.decode()
                # UUID is prefixed with '-' if still waiting
                if entry[0] == '-':
                    uuid = entry[1:].rstrip()  # rstrip '\n' etc.
                    print(f"I: processing request {uuid}")
                    if service_success(client, uuid):
                        # mark queue entry as processed
                        here = f.tell()
                        f.seek(-1 * len(entry), os.SEEK_CUR)
                        f.write(b'+')
                        f.seek(here, os.SEEK_SET)
                        print(f"completed {uuid}")
示例#7
0
 def _monitor_server():
     monitor_server = Monitor(zmq.Context(), "tcp://*:5000")
     for _ in range(num_req):
         monitor_server.process_request()
示例#8
0
文件: zmqbus.py 项目: kervi/kervi
    def reset_bus(self, process_id, signal_port, ip=None, root_address=None, event_port=None):
        self._handlers = NamedLists()
        self._process_id = process_id
        self._query_id_count = 0
        self._uuid_handler = uuid.uuid4().hex
        self._is_root = (root_address is None)
        self._root_address = root_address
        self._signal_address = "tcp://"+ ip +":" + str(signal_port)
        self._context = zmq.Context()
        self._response_events = []
        self._last_ping = time.time()
        self._connections_lock = threading.Lock()

        self._linked_handlers = []
        self._linked_response_handlers = []

        self._message_threads = []
        self._message_thread = 0
        self._message_threads_lock = threading.Lock()
        for i in range(5):
            self._message_threads += [ZMQHandlerThread(self)]

        self._root_event = None
        self._event_socket = self._context.socket(zmq.PUB)
        self._event_socket.bind(_KERVI_EVENT_ADDRESS)

        self._stream_socket = self._context.socket(zmq.PUB)
        self._stream_socket.bind(_KERVI_STREAM_ADDRESS)

        self._command_socket = self._context.socket(zmq.PUB)
        self._command_socket.bind(_KERVI_COMMAND_ADDRESS)

        self._query_socket = self._context.socket(zmq.PUB)
        self._query_socket.bind(_KERVI_QUERY_ADDRESS)

        self._message_handler = ZMQMessageThread(self, self._signal_address, True)
        self._query_handler = ZMQMessageThread(self, _KERVI_QUERY_ADDRESS)
        self._event_handler = ZMQMessageThread(self, _KERVI_EVENT_ADDRESS)
        self._stream_handler = ZMQMessageThread(self, _KERVI_STREAM_ADDRESS)
        self._command_handler = ZMQMessageThread(self, _KERVI_COMMAND_ADDRESS)

        self._register_handler("signal:ping", self._on_ping)
        self._message_handler.register("signal:ping")
        self._message_handler.register("signal:exit")
        
        self._message_handler.register("queryResponse")
        self._message_handler.register("query:")

        self._query_handler.register("query:")

        self._message_handler.register("signal:exit")
        self._query_handler.register("signal:exit")
        self._event_handler.register("signal:exit")
        self._stream_handler.register("signal:exit")
        self._command_handler.register("signal:exit")

        self._ping_thread = ZMQPingThread(self)
        self._command_lock = threading.Lock()
        self._event_lock = threading.Lock()
        self._stream_lock = threading.Lock()
        self._query_lock = threading.Lock()

        self._observed_streams = []

        self.register_query_handler("GetRoutingInfo", self._get_routing_info)
示例#9
0
    def __init__(
        self,
        client_address="127.0.0.1",
        interchange_address="127.0.0.1",
        client_ports=(50055, 50056, 50057),
        worker_ports=None,
        worker_port_range=(54000, 55000),
        hub_address=None,
        hub_port=None,
        heartbeat_threshold=60,
        logdir=".",
        logging_level=logging.INFO,
        poll_period=10,
    ) -> None:
        """
        Parameters
        ----------
        client_address : str
             The ip address at which the parsl client can be reached. Default: "127.0.0.1"

        interchange_address : str
             The ip address at which the workers will be able to reach the Interchange. Default: "127.0.0.1"

        client_ports : triple(int, int, int)
             The ports at which the client can be reached

        worker_ports : tuple(int, int)
             The specific two ports at which workers will connect to the Interchange. Default: None

        worker_port_range : tuple(int, int)
             The interchange picks ports at random from the range which will be used by workers.
             This is overridden when the worker_ports option is set. Default: (54000, 55000)

        hub_address : str
             The ip address at which the interchange can send info about managers to when monitoring is enabled.
             This is passed via dfk and executor automatically. Default: None (meaning monitoring disabled)

        hub_port : str
             The port at which the interchange can send info about managers to when monitoring is enabled.
             This is passed via dfk and executor automatically. Default: None (meaning monitoring disabled)

        heartbeat_threshold : int
             Number of seconds since the last heartbeat after which worker is considered lost.

        logdir : str
             Parsl log directory paths. Logs and temp files go here. Default: '.'

        logging_level : int
             Logging level as defined in the logging module. Default: logging.INFO (20)

        poll_period : int
             The main thread polling period, in milliseconds. Default: 10ms

        """
        self.logdir = logdir
        os.makedirs(self.logdir, exist_ok=True)

        start_file_logger("{}/interchange.log".format(self.logdir),
                          level=logging_level)
        logger.propagate = False
        logger.debug("Initializing Interchange process")

        self.client_address = client_address
        self.interchange_address = interchange_address
        self.poll_period = poll_period

        logger.info(
            "Attempting connection to client at {} on ports: {},{},{}".format(
                client_address, client_ports[0], client_ports[1],
                client_ports[2]))
        self.context = zmq.Context()
        self.task_incoming = self.context.socket(zmq.DEALER)
        self.task_incoming.set_hwm(0)
        self.task_incoming.RCVTIMEO = 10  # in milliseconds
        self.task_incoming.connect("tcp://{}:{}".format(
            client_address, client_ports[0]))
        self.results_outgoing = self.context.socket(zmq.DEALER)
        self.results_outgoing.set_hwm(0)
        self.results_outgoing.connect("tcp://{}:{}".format(
            client_address, client_ports[1]))

        self.command_channel = self.context.socket(zmq.REP)
        self.command_channel.RCVTIMEO = 1000  # in milliseconds
        self.command_channel.connect("tcp://{}:{}".format(
            client_address, client_ports[2]))
        logger.info("Connected to client")

        self.hub_address = hub_address
        self.hub_port = hub_port

        self.pending_task_queue: queue.Queue[Any] = queue.Queue(maxsize=10**6)

        self.worker_ports = worker_ports
        self.worker_port_range = worker_port_range

        self.task_outgoing = self.context.socket(zmq.ROUTER)
        self.task_outgoing.set_hwm(0)
        self.results_incoming = self.context.socket(zmq.ROUTER)
        self.results_incoming.set_hwm(0)

        if self.worker_ports:
            self.worker_task_port = self.worker_ports[0]
            self.worker_result_port = self.worker_ports[1]

            self.task_outgoing.bind("tcp://*:{}".format(self.worker_task_port))
            self.results_incoming.bind("tcp://*:{}".format(
                self.worker_result_port))

        else:
            self.worker_task_port = self.task_outgoing.bind_to_random_port(
                'tcp://*',
                min_port=worker_port_range[0],
                max_port=worker_port_range[1],
                max_tries=100)
            self.worker_result_port = self.results_incoming.bind_to_random_port(
                'tcp://*',
                min_port=worker_port_range[0],
                max_port=worker_port_range[1],
                max_tries=100)

        logger.info(
            "Bound to ports {},{} for incoming worker connections".format(
                self.worker_task_port, self.worker_result_port))

        self._ready_managers: Dict[bytes, ManagerRecord] = {}

        self.heartbeat_threshold = heartbeat_threshold

        self.current_platform = {
            'parsl_v':
            PARSL_VERSION,
            'python_v':
            "{}.{}.{}".format(sys.version_info.major, sys.version_info.minor,
                              sys.version_info.micro),
            'os':
            platform.system(),
            'hostname':
            platform.node(),
            'dir':
            os.getcwd()
        }

        logger.info("Platform info: {}".format(self.current_platform))
示例#10
0
def main():
    global example_dict
    global options
    global zmqSock

    defaultvar_configparser = '''\
[PMGRPCD]
topic = some.thing.is.topic-avro
bsservers = kafka.some.thing.net:9093
urlscreg =  https://schema-registry.some.thing.net:443
calocation = /some/thing/to/schema/registry/ssl/something_root_ca.crt
secproto = ssl
sslcertloc = /some/thing/to/ssl/certificate/location/something.crt
sslkeyloc = /some/thing/to/ssl/key/location/something.key
gpbmapfile = /etc/pmacct/telemetry/gpbmapfile.map
avscmapfile = /etc/pmacct/telemetry/schema_id_map_file.json
mitigation = True
debug = False
pmgrpcdlogfile = /var/log/pmgrpcd.log
serializelogfile = /var/log/pmgrpcd_avro.log
ipport = [::]:10000
workers = 20
cisco = True
huawei = True
example = True
examplepath = /tmp/stexamples
jsondatadumpfile = /tmp/stexamples/jsondatadumpfile.json
rawdatadumpfile = /tmp/stexamples/rawdatadumpfile.json
zmq = False
zmqipport = tcp://127.0.0.1:50000
kafkaavro = True
onlyopenconfig = False
'''

    default_gpbmapfile = '''\
huawei-ifm            =  huawei_ifm_pb2.Ifm()
huawei-devm           =  huawei_devm_pb2.Devm()
openconfig-interfaces =  openconfig_interfaces_pb2.Interfaces()
'''

    default_scidmapfile = '''\
{
  "10.215.133.15": {
    "openconfig-interfaces:interfaces": 249
    "openconfig-platform:components": 365
  },
  "10.215.133.17": {
    "openconfig-interfaces:interfaces": 299
  }
}
'''

    default_mitigationscript = '''\
#!/usr/bin/env python3.7
#
from datetime import datetime
import pprint
global mitigation
mitigation = {}

def mod_all_json_data(resdict):
  global mitigation
  mitigation = resdict.copy()

  if "collector" in mitigation:
    if ("grpc" in mitigation["collector"]) and ("data" in mitigation["collector"]):
      if "ne_vendor" in mitigation["collector"]["grpc"]:
        mod_all_pre()
  #      if mitigation["collector"]["grpc"]["ne_vendor"] == "Huawei":
  #        mod_huawei()
  #      elif mitigation["collector"]["grpc"]["ne_vendor"] == "Cisco":
  #        mod_cisco()
  #      mod_all_post()
  return mitigation

def mod_all_pre():
  global mitigation
  pass

if __name__ == '__mod_all_json_data__':
'''

    usage_str = "%prog [options]"
    version_str = "%prog " + SCRIPTVERSION
    parser = OptionParser(usage=usage_str, version=version_str)

    config = configparser.ConfigParser()
    if os.path.isfile(CONFIGFILE):
        config.read(CONFIGFILE)
        if not 'PMGRPCD' in config.sections():
            #add Section GRPCD to configfile
            print("Add Section PMGRPCD to the Configfile %s" % CONFIGFILE)
            with open(CONFIGFILE, 'a') as configf:
                configf.write(defaultvar_configparser)
            config.read(CONFIGFILE)
    else:
        with open(CONFIGFILE, 'w') as configf:
            configf.write(defaultvar_configparser)
        config.read(CONFIGFILE)

    if not os.path.isfile(GPBMAPFILE):
        with open(GPBMAPFILE, 'w') as gpbmapf:
            gpbmapf.write(default_gpbmapfile)

    if not os.path.isfile(SCIDMAPFILE):
        with open(SCIDMAPFILE, 'w') as scidmapf:
            scidmapf.write(default_scidmapfile)

    if not os.path.isfile(MITIGATIONSCRIPT):
        with open(MITIGATIONSCRIPT, 'w') as mitigf:
            mitigf.write(default_mitigationscript)

    parser.add_option("-T",
                      "--topic",
                      default=config.get("PMGRPCD", 'topic'),
                      dest="topic",
                      help="the json data are serialized to this topic")
    parser.add_option("-B",
                      "--bsservers",
                      default=config.get("PMGRPCD", 'bsservers'),
                      dest="bsservers",
                      help="bootstrap servers url with port to reach kafka")
    parser.add_option("-S",
                      "--secproto",
                      default=config.get("PMGRPCD", 'secproto'),
                      dest="secproto",
                      help="security protocol (is normaly ssl)")
    parser.add_option("-O",
                      "--sslcertloc",
                      default=config.get("PMGRPCD", 'sslcertloc'),
                      dest="sslcertloc",
                      help="path/file to ssl certification location")
    parser.add_option("-K",
                      "--sslkeyloc",
                      default=config.get("PMGRPCD", 'sslkeyloc'),
                      dest="sslkeyloc",
                      help="path/file to ssl key location")
    parser.add_option("-U",
                      "--urlscreg",
                      default=config.get("PMGRPCD", 'urlscreg'),
                      dest="urlscreg",
                      help="the url to the schema-registry")
    parser.add_option(
        "-L",
        "--calocation",
        default=config.get("PMGRPCD", 'calocation'),
        dest="calocation",
        help="the ca_location used to connect to schema-registry")
    parser.add_option(
        "-G",
        "--gpbmapfile",
        default=config.get("PMGRPCD", 'gpbmapfile'),
        dest="gpbmapfile",
        help="change path/name of gpbmapfile [default: %default]")
    parser.add_option("-M",
                      "--avscmapfile",
                      default=config.get("PMGRPCD", 'avscmapfile'),
                      dest="avscmapfile",
                      help="path/name to the avscmapfile")
    parser.add_option(
        "-m",
        "--mitigation",
        action="store_true",
        default=config.getboolean("PMGRPCD", 'mitigation'),
        dest="mitigation",
        help=
        "enable plugin mitigation mod_result_dict from python module mitigation.py"
    )
    parser.add_option("-d",
                      "--debug",
                      action="store_true",
                      default=config.getboolean("PMGRPCD", 'debug'),
                      dest="debug",
                      help="enable debug messages on the logfile")
    parser.add_option(
        "-l",
        "--pmgrpcdlogfile",
        default=config.get("PMGRPCD", 'pmgrpcdlogfile'),
        dest='pmgrpcdlogfile',
        help=
        "pmgrpcdlogfile the logfile on the collector face with path/name [default: %default]"
    )
    parser.add_option(
        "-a",
        "--serializelogfile",
        default=config.get("PMGRPCD", 'serializelogfile'),
        dest="serializelogfile",
        help=
        "serializelogfile with path/name for kafka avro and zmq messages [default: %default]"
    )
    parser.add_option(
        "-I",
        "--ipport",
        action="store",
        type='string',
        default=config.get("PMGRPCD", 'ipport'),
        dest="ipport",
        help="change the ipport the daemon is listen on [default: %default]")
    parser.add_option(
        "-w",
        "--workers",
        action="store",
        type='int',
        default=config.get("PMGRPCD", 'workers'),
        dest="workers",
        help="change the nr of paralell working processes [default: %default]")
    parser.add_option(
        "-C",
        "--cisco",
        action="store_true",
        default=config.getboolean("PMGRPCD", 'cisco'),
        dest="cisco",
        help="enable the grpc messages comming from Cisco [default: %default]")
    parser.add_option(
        "-H",
        "--huawei",
        action="store_true",
        default=config.getboolean("PMGRPCD", 'huawei'),
        dest="huawei",
        help="enable the grpc messages comming from Huawei [default: %default]"
    )
    parser.add_option(
        "-e",
        "--example",
        action="store_true",
        default=config.getboolean("PMGRPCD", 'example'),
        dest="example",
        help="Enable writing Example Json-Data-Files [default: %default]")
    parser.add_option(
        "-E",
        "--examplepath",
        default=config.get("PMGRPCD", 'examplepath'),
        dest="examplepath",
        help="dump a json example of each proto/path to this examplepath")
    parser.add_option(
        "-j",
        "--jsondatadumpfile",
        dest="jsondatadumpfile",
        help="writing the output to the jsondatadumpfile path/name")
    parser.add_option(
        "-r",
        "--rawdatafile",
        dest="rawdatafile",
        help=
        "writing the raw data from the routers to the rowdatafile path/name")
    parser.add_option("-z",
                      "--zmq",
                      action="store_true",
                      default=config.getboolean("PMGRPCD", 'zmq'),
                      dest="zmq",
                      help="enable forwarding to ZMQ [default: %default]")
    parser.add_option(
        "-p",
        "--zmqipport",
        default=config.get("PMGRPCD", 'zmqipport'),
        dest="zmqipport",
        help="define proto://ip:port of zmq socket bind [default: %default]")
    parser.add_option(
        "-k",
        "--kafkaavro",
        action="store_true",
        default=config.getboolean("PMGRPCD", 'kafkaavro'),
        dest="kafkaavro",
        help=
        "enable forwarding to Kafka kafkaavro (with schema-registry) [default: %default]"
    )
    parser.add_option("-o",
                      "--onlyopenconfig",
                      action="store_true",
                      default=config.getboolean("PMGRPCD", 'onlyopenconfig'),
                      dest="onlyopenconfig",
                      help="only accept pakets of openconfig")
    parser.add_option("-i",
                      "--ip",
                      dest="ip",
                      help="only accept pakets of this single ip")
    parser.add_option(
        "-A",
        "--avscid",
        dest="avscid",
        help=
        "this is to serialize manually with avscid and jsondatafile (for development)"
    )
    parser.add_option(
        "-J",
        "--jsondatafile",
        dest="jsondatafile",
        help=
        "this is to serialize manually with avscid and jsondatafile (for development)"
    )
    parser.add_option(
        "-c",
        "--console",
        action="store_true",
        dest="console",
        help=
        "this is to display all log-messages also on console (for development)"
    )
    parser.add_option("-v",
                      action="store_true",
                      dest="version",
                      help="print version of this script")
    (options, args) = parser.parse_args()

    init_pmgrpcdlog()
    init_serializelog()

    if options.version:
        print(parser.get_version())
        raise SystemExit

    pmgrpcdlog.info("startoptions of this script: %s" % str(options))

    #Test-Statements Logging
    #-----------------------
    #pmgrpcdlog.debug('debug message')
    #pmgrpcdlog.info('info message')
    #pmgrpcdlog.warning('warn message')
    #pmgrpcdlog.error('error message')
    #pmgrpcdlog.critical('critical message')

    #serializelog.debug('debug message')
    #serializelog.info('info message')
    #serializelog.warning('warn message')
    #serializelog.error('error message')
    #serializelog.critical('critical message')

    if options.zmq:
        zmqContext = zmq.Context()
        zmqSock = zmqContext.socket(zmq.PUSH)
        zmqSock.bind(options.zmqipport)

    pmgrpcdlog.info("enable listening to SIGNAL USR1 with Sinalhandler")
    signal.signal(signal.SIGUSR1, signalhandler)
    pmgrpcdlog.info("enable listening to SIGNAL USR2 with Sinalhandler")
    signal.signal(signal.SIGUSR2, signalhandler)

    if (options.avscid and options.jsondatafile):
        pmgrpcdlog.info(
            "manually serialize with  avscid (%s) and jsondatafile (%s)" %
            (options.avscid, options.jsondatafile))
        avscid = int(options.avscid)
        avsc = getavroschema(avscid)
        avroinstance = getavro_schid_instance(avscid)
        with open(options.jsondatafile, 'r') as jsondatahandler:
            jsondata = json.load(jsondatahandler)
        #serialize(json.dumps(avsc), jsondata, topic, avscid, avroinstance)
        serialize(jsondata, options.topic, avscid, avroinstance)
    elif (options.avscid or options.jsondatafile):
        pmgrpcdlog.info(
            "manually serialize need both options avscid and jsondatafile")
        parser.print_help()
    else:
        pmgrpcdlog.info('pmgrpsd.py is started at %s' % (str(datetime.now())))
        serve()
示例#11
0
#test.py
import zmq, sys, json
sock = zmq.Context().socket(zmq.REQ)
sock.connect("tcp://127.0.0.1:5556")
sock.send(json.dumps([{"src": " ".join(sys.argv[1:])}]))
print sock.recv()
示例#12
0
def main():
    """ main method """
    global sid, wanip

    # Prepare our context and publisher
    context = zmq.Context()

    subscriber = context.socket(zmq.SUB)
    subscriber.connect("tcp://61.142.208.98:5563")
    subscriber.setsockopt(zmq.SUBSCRIBE, b"")

    feedback = context.socket(zmq.PUSH)
    feedback.connect("tcp://61.142.208.98:5564")

    broadcast_timeout = 60
    broadcast_lasttime = time.time()

    poll_timeout = 3
    tasks = {}

    while True:
        #server maybe restart,that reconnect to server
        if time.time() - broadcast_lasttime > broadcast_timeout * 3:
            print 'connect to pub again'
            broadcast_lasttime = time.time()
            #no receive broadcast check live
            subscriber.close()
            feedback.close()
            #create again
            subscriber = context.socket(zmq.SUB)
            subscriber.connect("tcp://61.142.208.98:5563")
            subscriber.setsockopt(zmq.SUBSCRIBE, b"")
            feedback = context.socket(zmq.PUSH)
            feedback.connect("tcp://61.142.208.98:5564")

        #poll task result
        shell_start_time = time.time()
        while time.time() - shell_start_time < poll_timeout:
            taskids = tasks.keys()
            for taskid in taskids:
                task = tasks[taskid]
                print taskid, task
                try:
                    ret = subprocess.Popen.poll(task['fd'])
                    print ret
                    if ret is None:
                        if time.time() - tasks[taskid]['runtime'] > tasks[
                                taskid]['timeout']:
                            task['fd'].kill()
                            task['fd'].wait()
                            del tasks[taskid]
                        continue
                    else:
                        taskret = task['fd'].stdout.read()
                        taskerr = task['fd'].stderr.read()
                        print taskret, taskerr
                        feedback.send_multipart([
                            sid, wanip, task['type'], taskid, taskret, taskerr
                        ])
                        del tasks[taskid]
                except Exception, e:
                    print e
            time.sleep(0.5)

        #recv task
        try:
            [sids, tasktype, timeout, taskid,
             taskdata] = subscriber.recv_multipart(zmq.DONTWAIT)
        #except zmq.Again:
        #	time.sleep(1)
        #	continue
        except zmq.ZMQError, e:
            if e.errno == zmq.EAGAIN:
                time.sleep(1)
            else:
                print e
            continue
示例#13
0
 def __init__(self):
     Thread.__init__(self)
     self.context = zmq.Context()
                        found = 1
                        break
    if (found == 1):
        table.loc[table['ID']==table.iloc[i]['ID'],'Dfree'] = json.dumps(isfree)
    csvfile=table.to_csv()
    mem1.write(str(csvfile))    
    sem1.release()
    #print(download_from_machine)
    return download_from_machine


if __name__ == "__main__":      
    id = 1334
    global lu1,mem1,sem1
    [lu1,mem1,sem1] = init(id)
    masterContext = zmq.Context()
    masterSocket = masterContext.socket(zmq.REP)
    with open('master_tracker_config.json') as f:
        replica_factor = json.load(f)['R']
    portMaster = sys.argv[1]
    #print(replica_factor)
    #masterSocket.bind("tcp://*:%s" % portMaster)
    #randports_acquire()
    #free_test = {'IPv4':'192.168.17.5','Type':'D','Port':6001}
    #port_freeing(free_test)
    #free_test = {'IPv4':'192.168.17.5','Type':'D','Port':6000}
    #port_freeing(free_test)
    #append_file_todk({'IPv4':'192.168.17.3','Type':'D','Port':6000,'Filename':'zmq.mp4'})
    i = 0
    #download()
    masterContext = zmq.Context()
示例#15
0
def radard_thread(gctx=None):
    set_realtime_priority(1)

    # wait for stats about the car to come in from controls
    cloudlog.info("radard is waiting for CarParams")
    CP = car.CarParams.from_bytes(Params().get("CarParams", block=True))
    VM = VehicleModel(CP)
    cloudlog.info("radard got CarParams")

    # import the radar from the fingerprint
    cloudlog.info("radard is importing %s", CP.radarName)
    exec('from selfdrive.radar.' + CP.radarName +
         '.interface import RadarInterface')

    context = zmq.Context()

    # *** subscribe to features and model from visiond
    model = messaging.sub_sock(context, service_list['model'].port)
    live100 = messaging.sub_sock(context, service_list['live100'].port)

    PP = PathPlanner()
    RI = RadarInterface()

    last_md_ts = 0
    last_l100_ts = 0

    # *** publish live20 and liveTracks
    live20 = messaging.pub_sock(context, service_list['live20'].port)
    liveTracks = messaging.pub_sock(context, service_list['liveTracks'].port)

    path_x = np.arange(0.0, 140.0, 0.1)  # 140 meters is max

    # Time-alignment
    rate = 20.  # model and radar are both at 20Hz
    tsv = 1. / rate
    v_len = 20  # how many speed data points to remember for t alignment with rdr data

    enabled = 0
    steer_angle = 0.

    tracks = defaultdict(dict)

    # Kalman filter stuff:
    ekfv = EKFV1D()
    speedSensorV = SimpleSensor(XV, 1, 2)

    # v_ego
    v_ego = None
    v_ego_array = np.zeros([2, v_len])
    v_ego_t_aligned = 0.

    rk = Ratekeeper(rate, print_delay_threshold=np.inf)
    while 1:
        rr = RI.update()

        ar_pts = {}
        for pt in rr.points:
            ar_pts[pt.trackId] = [
                pt.dRel + RDR_TO_LDR, pt.yRel, pt.vRel, pt.aRel, None, False,
                None
            ]

        # receive the live100s
        l100 = messaging.recv_sock(live100)
        if l100 is not None:
            enabled = l100.live100.enabled
            v_ego = l100.live100.vEgo
            steer_angle = l100.live100.angleSteers

            v_ego_array = np.append(v_ego_array,
                                    [[v_ego], [float(rk.frame) / rate]], 1)
            v_ego_array = v_ego_array[:, 1:]

            last_l100_ts = l100.logMonoTime

        if v_ego is None:
            continue

        md = messaging.recv_sock(model)
        if md is not None:
            last_md_ts = md.logMonoTime

        # *** get path prediction from the model ***
        PP.update(v_ego, md)

        # run kalman filter only if prob is high enough
        if PP.lead_prob > 0.7:
            ekfv.update(speedSensorV.read(PP.lead_dist, covar=PP.lead_var))
            ekfv.predict(tsv)
            ar_pts[VISION_POINT] = (float(ekfv.state[XV]),
                                    np.polyval(PP.d_poly,
                                               float(ekfv.state[XV])),
                                    float(ekfv.state[SPEEDV]), np.nan,
                                    last_md_ts, np.nan, sec_since_boot())
        else:
            ekfv.state[XV] = PP.lead_dist
            ekfv.covar = (np.diag([PP.lead_var, ekfv.var_init]))
            ekfv.state[SPEEDV] = 0.
            if VISION_POINT in ar_pts:
                del ar_pts[VISION_POINT]

        # *** compute the likely path_y ***
        if enabled:  # use path from model path_poly
            path_y = np.polyval(PP.d_poly, path_x)
        else:  # use path from steer, set angle_offset to 0 since calibration does not exactly report the physical offset
            path_y = calc_lookahead_offset(v_ego,
                                           steer_angle,
                                           path_x,
                                           VM,
                                           angle_offset=0)[0]

        # *** remove missing points from meta data ***
        for ids in tracks.keys():
            if ids not in ar_pts:
                tracks.pop(ids, None)

        # *** compute the tracks ***
        for ids in ar_pts:
            # ignore the vision point for now
            if ids == VISION_POINT and not VISION_ONLY:
                continue
            elif ids != VISION_POINT and VISION_ONLY:
                continue
            rpt = ar_pts[ids]

            # align v_ego by a fixed time to align it with the radar measurement
            cur_time = float(rk.frame) / rate
            v_ego_t_aligned = np.interp(cur_time - RI.delay, v_ego_array[1],
                                        v_ego_array[0])
            d_path = np.sqrt(
                np.amin((path_x - rpt[0])**2 + (path_y - rpt[1])**2))

            # create the track if it doesn't exist or it's a new track
            if ids not in tracks or rpt[5] == 1:
                tracks[ids] = Track()
            tracks[ids].update(rpt[0], rpt[1], rpt[2], d_path, v_ego_t_aligned)

            # allow the vision model to remove the stationary flag if distance and rel speed roughly match
            if VISION_POINT in ar_pts:
                dist_to_vision = np.sqrt(
                    (0.5 * (ar_pts[VISION_POINT][0] - rpt[0]))**2 +
                    (2 * (ar_pts[VISION_POINT][1] - rpt[1]))**2)
                rel_speed_diff = abs(ar_pts[VISION_POINT][2] - rpt[2])
                tracks[ids].mix_vision(dist_to_vision, rel_speed_diff)

        # publish tracks (debugging)
        dat = messaging.new_message()
        dat.init('liveTracks', len(tracks))

        if DEBUG:
            print "NEW CYCLE"
            if VISION_POINT in ar_pts:
                print "vision", ar_pts[VISION_POINT]

        for cnt, ids in enumerate(tracks.keys()):
            if DEBUG:
                print "id: %4.0f x:  %4.1f  y: %4.1f  v: %4.1f  d: %4.1f s: %1.0f" % \
                  (ids, tracks[ids].dRel, tracks[ids].yRel, tracks[ids].vRel,
                   tracks[ids].dPath, tracks[ids].stationary)
            dat.liveTracks[cnt].trackId = ids
            dat.liveTracks[cnt].dRel = float(tracks[ids].dRel)
            dat.liveTracks[cnt].yRel = float(tracks[ids].yRel)
            dat.liveTracks[cnt].vRel = float(tracks[ids].vRel)
            dat.liveTracks[cnt].aRel = float(tracks[ids].aRel)
            dat.liveTracks[cnt].stationary = tracks[ids].stationary
            dat.liveTracks[cnt].oncoming = tracks[ids].oncoming
        liveTracks.send(dat.to_bytes())

        idens = tracks.keys()
        track_pts = np.array(
            [tracks[iden].get_key_for_cluster() for iden in idens])

        # If we have multiple points, cluster them
        if len(track_pts) > 1:
            link = linkage_vector(track_pts, method='centroid')
            cluster_idxs = fcluster(link, 2.5, criterion='distance')
            clusters = [None] * max(cluster_idxs)

            for idx in xrange(len(track_pts)):
                cluster_i = cluster_idxs[idx] - 1

                if clusters[cluster_i] == None:
                    clusters[cluster_i] = Cluster()
                clusters[cluster_i].add(tracks[idens[idx]])
        elif len(track_pts) == 1:
            # TODO: why do we need this?
            clusters = [Cluster()]
            clusters[0].add(tracks[idens[0]])
        else:
            clusters = []

        if DEBUG:
            for i in clusters:
                print i
        # *** extract the lead car ***
        lead_clusters = [c for c in clusters if c.is_potential_lead(v_ego)]
        lead_clusters.sort(key=lambda x: x.dRel)
        lead_len = len(lead_clusters)

        # *** extract the second lead from the whole set of leads ***
        lead2_clusters = [
            c for c in lead_clusters if c.is_potential_lead2(lead_clusters)
        ]
        lead2_clusters.sort(key=lambda x: x.dRel)
        lead2_len = len(lead2_clusters)

        # *** publish live20 ***
        dat = messaging.new_message()
        dat.init('live20')
        dat.live20.mdMonoTime = last_md_ts
        dat.live20.canMonoTimes = list(rr.canMonoTimes)
        dat.live20.radarErrors = list(rr.errors)
        dat.live20.l100MonoTime = last_l100_ts
        if lead_len > 0:
            lead_clusters[0].toLive20(dat.live20.leadOne)
            if lead2_len > 0:
                lead2_clusters[0].toLive20(dat.live20.leadTwo)
            else:
                dat.live20.leadTwo.status = False
        else:
            dat.live20.leadOne.status = False

        dat.live20.cumLagMs = -rk.remaining * 1000.
        live20.send(dat.to_bytes())

        rk.monitor_time()
示例#16
0
import zmq
import math
import time

context = zmq.Context()
# Define the socket using the "Context"
sock = context.socket(zmq.PULL)
sock.connect("tcp://127.0.0.1:5557")

context2 = zmq.Context()
dashboard = context2.socket(zmq.PUSH)
dashboard.connect("tcp://127.0.0.1:3000")
# Run a simple "Echo" Server
while True:
    # ZeroMQ Context
    message = sock.recv()
    message = message.decode()
    if message:
        reply_msg = "Square root of {} = {}".format(message,
                                                    math.sqrt(int(message)))
        print(reply_msg)
        dashboard.send(reply_msg.encode())
示例#17
0
    def __init__(
        self,
        _ClientID='dwx-zeromq',  # Unique ID for this client
        _host='localhost',  # Host to connect to
        _protocol='tcp',  # Connection protocol
        _PUSH_PORT=32768,  # Port for Sending commands
        _PULL_PORT=32769,  # Port for Receiving responses
        _SUB_PORT=32770,  # Port for Subscribing for prices
        _delimiter=';',
        _verbose=True,  # String delimiter
        _poll_timeout=10,  # ZMQ Poller Timeout (ms)
        _sleep_delay=0.001,  # 1 ms for time.sleep()
        _monitor=False):  # Experimental ZeroMQ Socket Monitoring

        ######################################################################

        # Strategy Status (if this is False, ZeroMQ will not listen for data)
        self._ACTIVE = True

        # Client ID
        self._ClientID = _ClientID

        # ZeroMQ Host
        self._host = _host

        # Connection Protocol
        self._protocol = _protocol

        # ZeroMQ Context
        self._ZMQ_CONTEXT = zmq.Context()

        # TCP Connection URL Template
        self._URL = self._protocol + "://" + self._host + ":"

        # Ports for PUSH, PULL and SUB sockets respectively
        self._PUSH_PORT = _PUSH_PORT
        self._PULL_PORT = _PULL_PORT
        self._SUB_PORT = _SUB_PORT

        # Create Sockets
        self._PUSH_SOCKET = self._ZMQ_CONTEXT.socket(zmq.PUSH)
        self._PUSH_SOCKET.setsockopt(zmq.SNDHWM, 1)
        self._PUSH_SOCKET_STATUS = {'state': True, 'latest_event': 'N/A'}

        self._PULL_SOCKET = self._ZMQ_CONTEXT.socket(zmq.PULL)
        self._PULL_SOCKET.setsockopt(zmq.RCVHWM, 1)
        self._PULL_SOCKET_STATUS = {'state': True, 'latest_event': 'N/A'}

        self._SUB_SOCKET = self._ZMQ_CONTEXT.socket(zmq.SUB)

        # Bind PUSH Socket to send commands to MetaTrader
        self._PUSH_SOCKET.connect(self._URL + str(self._PUSH_PORT))
        print("[INIT] Ready to send commands to METATRADER (PUSH): " +
              str(self._PUSH_PORT))

        # Connect PULL Socket to receive command responses from MetaTrader
        self._PULL_SOCKET.connect(self._URL + str(self._PULL_PORT))
        print("[INIT] Listening for responses from METATRADER (PULL): " +
              str(self._PULL_PORT))

        # Connect SUB Socket to receive market data from MetaTrader
        print("[INIT] Listening for market data from METATRADER (SUB): " +
              str(self._SUB_PORT))
        self._SUB_SOCKET.connect(self._URL + str(self._SUB_PORT))

        # Initialize POLL set and register PULL and SUB sockets
        self._poller = zmq.Poller()
        self._poller.register(self._PULL_SOCKET, zmq.POLLIN)
        self._poller.register(self._SUB_SOCKET, zmq.POLLIN)

        # Start listening for responses to commands and new market data
        self._string_delimiter = _delimiter

        # BID/ASK Market Data Subscription Threads ({SYMBOL: Thread})
        self._MarketData_Thread = None

        # Socket Monitor Threads
        self._PUSH_Monitor_Thread = None
        self._PULL_Monitor_Thread = None

        # Market Data Dictionary by Symbol (holds tick data)
        self._Market_Data_DB = {}  # {SYMBOL: {TIMESTAMP: (BID, ASK)}}

        # Current Bid Ask
        self._Curr_Bid_Ask = {}

        # Temporary Order STRUCT for convenience wrappers later.
        self.temp_order_dict = self._generate_default_order_dict()

        # Thread returns the most recently received DATA block here
        self._thread_data_output = None

        # Verbosity
        self._verbose = _verbose

        # ZMQ Poller Timeout
        self._poll_timeout = _poll_timeout

        # Global Sleep Delay
        self._sleep_delay = _sleep_delay

        # Begin polling for PULL / SUB data
        self._MarketData_Thread = Thread(target=self._DWX_ZMQ_Poll_Data_,
                                         args=(
                                             self._string_delimiter,
                                             self._poll_timeout,
                                         ))
        self._MarketData_Thread.daemon = True
        self._MarketData_Thread.start()

        ###########################################
        # Enable/Disable ZeroMQ Socket Monitoring #
        ###########################################
        if _monitor == True:

            # ZeroMQ Monitor Event Map
            self._MONITOR_EVENT_MAP = {}

            print("\n[KERNEL] Retrieving ZeroMQ Monitor Event Names:\n")

            for name in dir(zmq):
                if name.startswith('EVENT_'):
                    value = getattr(zmq, name)
                    print(f"{value}\t\t:\t{name}")
                    self._MONITOR_EVENT_MAP[value] = name

            print("\n[KERNEL] Socket Monitoring Config -> DONE!\n")

            # Disable PUSH/PULL sockets and let MONITOR events control them.
            self._PUSH_SOCKET_STATUS['state'] = False
            self._PULL_SOCKET_STATUS['state'] = False

            # PUSH
            self._PUSH_Monitor_Thread = Thread(
                target=self._DWX_ZMQ_EVENT_MONITOR_,
                args=(
                    "PUSH",
                    self._PUSH_SOCKET.get_monitor_socket(),
                ))

            self._PUSH_Monitor_Thread.daemon = True
            self._PUSH_Monitor_Thread.start()

            # PULL
            self._PULL_Monitor_Thread = Thread(
                target=self._DWX_ZMQ_EVENT_MONITOR_,
                args=(
                    "PULL",
                    self._PULL_SOCKET.get_monitor_socket(),
                ))

            self._PULL_Monitor_Thread.daemon = True
            self._PULL_Monitor_Thread.start()
示例#18
0
def service(timebase, eyes_are_alive, ipc_pub_url, ipc_sub_url, ipc_push_url,
            user_dir, version):
    """Maps pupil to gaze data, can run various plug-ins.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``start_plugin``: Starts given plugin with the given arguments
       ``eye_process.started``: Sets the detection method eye process
       ``service_process.should_stop``: Stops the service process

    Emits notifications:
        ``eye_process.should_start``
        ``eye_process.should_stop``
        ``set_detection_mapping_mode``
        ``service_process.started``
        ``service_process.stopped``
        ``launcher_process.should_stop``

    Emits data:
        ``gaze``: Gaze data from current gaze mapping plugin.``
        ``*``: any other plugin generated data in the events that it not [dt,pupil,gaze].
    """

    # We defer the imports because of multiprocessing.
    # Otherwise the service process each process also loads the other imports.
    # This is not harmful but unnecessary.

    # general imports
    from time import sleep
    import logging
    import zmq
    import zmq_tools

    # zmq ipc setup
    zmq_ctx = zmq.Context()
    ipc_pub = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    gaze_pub = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    pupil_sub = zmq_tools.Msg_Receiver(zmq_ctx,
                                       ipc_sub_url,
                                       topics=('pupil', ))
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,
                                        ipc_sub_url,
                                        topics=('notify', ))

    poller = zmq.Poller()
    poller.register(pupil_sub.socket)
    poller.register(notify_sub.socket)

    # log setup
    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    def launch_eye_process(eye_id, delay=0):
        n = {
            'subject': 'eye_process.should_start',
            'eye_id': eye_id,
            'delay': delay
        }
        ipc_pub.notify(n)

    def stop_eye_process(eye_id):
        n = {'subject': 'eye_process.should_stop', 'eye_id': eye_id}
        ipc_pub.notify(n)

    try:

        # helpers/utils
        from file_methods import Persistent_Dict
        from methods import delta_t, get_system_info
        from version_utils import VersionFormat
        import audio
        from uvc import get_time_monotonic

        # trigger pupil detector cpp build:
        import pupil_detectors
        del pupil_detectors

        # Plug-ins
        from plugin import Plugin, Plugin_List, import_runtime_plugins
        from calibration_routines import calibration_plugins, gaze_mapping_plugins
        from pupil_remote import Pupil_Remote
        from pupil_groups import Pupil_Groups
        from frame_publisher import Frame_Publisher
        from blink_detection import Blink_Detection
        from service_ui import Service_UI

        logger.info('Application Version: {}'.format(version))
        logger.info('System Info: {}'.format(get_system_info()))

        # g_pool holds variables for this process they are accesible to all plugins
        g_pool = Global_Container()
        g_pool.app = 'service'
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.get_now = get_time_monotonic
        g_pool.zmq_ctx = zmq_ctx
        g_pool.ipc_pub = ipc_pub
        g_pool.ipc_pub_url = ipc_pub_url
        g_pool.ipc_sub_url = ipc_sub_url
        g_pool.ipc_push_url = ipc_push_url
        g_pool.eyes_are_alive = eyes_are_alive
        g_pool.timebase = timebase

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value

        g_pool.get_timestamp = get_timestamp

        # manage plugins
        runtime_plugins = import_runtime_plugins(
            os.path.join(g_pool.user_dir, 'plugins'))
        user_launchable_plugins = [
            Service_UI, Pupil_Groups, Pupil_Remote, Frame_Publisher,
            Blink_Detection
        ] + runtime_plugins
        plugin_by_index = runtime_plugins + calibration_plugins + gaze_mapping_plugins + user_launchable_plugins
        name_by_index = [p.__name__ for p in plugin_by_index]
        plugin_by_name = dict(zip(name_by_index, plugin_by_index))
        default_plugins = [('Service_UI', {}), ('Dummy_Gaze_Mapper', {}),
                           ('HMD_Calibration', {}), ('Pupil_Remote', {})]
        g_pool.plugin_by_name = plugin_by_name

        tick = delta_t()

        def get_dt():
            return next(tick)

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(g_pool.user_dir, 'user_settings_service'))
        if session_settings.get("version",
                                VersionFormat('0.0')) < g_pool.version:
            logger.info(
                "Session setting are from older version of this app. I will not use those."
            )
            session_settings.clear()

        g_pool.min_calibration_confidence = session_settings.get(
            'min_calibration_confidence', 0.8)
        g_pool.detection_mapping_mode = session_settings.get(
            'detection_mapping_mode', '2d')
        g_pool.active_calibration_plugin = None
        g_pool.active_gaze_mapping_plugin = None

        audio.audio_mode = session_settings.get('audio_mode',
                                                audio.default_audio_mode)

        # plugins that are loaded based on user settings from previous session
        g_pool.plugins = Plugin_List(
            g_pool, session_settings.get('loaded_plugins', default_plugins))

        def handle_notifications(n):
            subject = n['subject']
            if subject == 'set_detection_mapping_mode':
                if n['mode'] == '2d':
                    if "Vector_Gaze_Mapper" in g_pool.active_gaze_mapping_plugin.class_name:
                        logger.warning(
                            "The gaze mapper is not supported in 2d mode. Please recalibrate."
                        )
                        g_pool.plugins.add(plugin_by_name['Dummy_Gaze_Mapper'])
                g_pool.detection_mapping_mode = n['mode']
            elif subject == 'start_plugin':
                g_pool.plugins.add(plugin_by_name[n['name']],
                                   args=n.get('args', {}))
            elif subject == 'eye_process.started':
                n = {
                    'subject': 'set_detection_mapping_mode',
                    'mode': g_pool.detection_mapping_mode
                }
                ipc_pub.notify(n)
            elif subject == 'service_process.should_stop':
                g_pool.service_should_run = False
            elif subject.startswith('meta.should_doc'):
                ipc_pub.notify({
                    'subject': 'meta.doc',
                    'actor': g_pool.app,
                    'doc': service.__doc__
                })
                for p in g_pool.plugins:
                    if p.on_notify.__doc__ and p.__class__.on_notify != Plugin.on_notify:
                        ipc_pub.notify({
                            'subject': 'meta.doc',
                            'actor': p.class_name,
                            'doc': p.on_notify.__doc__
                        })

        if session_settings.get('eye1_process_alive', False):
            launch_eye_process(1, delay=0.3)
        if session_settings.get('eye0_process_alive', True):
            launch_eye_process(0, delay=0.0)

        ipc_pub.notify({'subject': 'service_process.started'})
        logger.warning('Process started.')
        g_pool.service_should_run = True

        # initiate ui update loop
        ipc_pub.notify({
            'subject': 'service_process.ui.should_update',
            'initial_delay': 1 / 40
        })

        # Event loop
        while g_pool.service_should_run:
            socks = dict(poller.poll())
            if pupil_sub.socket in socks:
                t, p = pupil_sub.recv()
                new_gaze_data = g_pool.active_gaze_mapping_plugin.on_pupil_datum(
                    p)
                for g in new_gaze_data:
                    gaze_pub.send('gaze', g)

                events = {}
                events['gaze_positions'] = new_gaze_data
                events['pupil_positions'] = [p]
                for plugin in g_pool.plugins:
                    plugin.recent_events(events=events)

            if notify_sub.socket in socks:
                t, n = notify_sub.recv()
                handle_notifications(n)
                for plugin in g_pool.plugins:
                    plugin.on_notify(n)

            # check if a plugin need to be destroyed
            g_pool.plugins.clean()

        session_settings['loaded_plugins'] = g_pool.plugins.get_initializers()
        session_settings['version'] = str(g_pool.version)
        session_settings['eye0_process_alive'] = eyes_are_alive[0].value
        session_settings['eye1_process_alive'] = eyes_are_alive[1].value
        session_settings[
            'min_calibration_confidence'] = g_pool.min_calibration_confidence
        session_settings[
            'detection_mapping_mode'] = g_pool.detection_mapping_mode
        session_settings['audio_mode'] = audio.audio_mode
        session_settings.close()

        # de-init all running plugins
        for p in g_pool.plugins:
            p.alive = False
        g_pool.plugins.clean()

    except:
        import traceback
        trace = traceback.format_exc()
        logger.error('Process Service crashed with trace:\n{}'.format(trace))

    finally:
        # shut down eye processes:
        stop_eye_process(0)
        stop_eye_process(1)

        logger.info("Process shutting down.")
        ipc_pub.notify({'subject': 'service_process.stopped'})

        # shut down launcher
        n = {'subject': 'launcher_process.should_stop'}
        ipc_pub.notify(n)
        sleep(1.0)
示例#19
0
def ros2zmq():
    global g_socket, g_lock

    with g_lock:
        context = zmq.Context()
        g_socket = context.socket(zmq.PUSH)
        g_socket.setsockopt(zmq.LINGER, 100)  # milliseconds
        g_socket.bind('tcp://*:5555')

    context2 = zmq.Context()
    g_socket2 = context2.socket(zmq.PULL)
    g_socket2.RCVTIMEO = 5000  # in milliseconds
    g_socket2.bind('tcp://*:5556')
  
    rospy.init_node('listener', anonymous=True)
    rospy.Subscriber('/scout_1/joint_states', JointState, callback_topic, '/scout_1/joint_states')
    rospy.Subscriber('/scout_1/laser/scan', LaserScan, callback_lidar)
    rospy.Subscriber('/scout_1/imu', Imu, callback_imu)

    lights_on = rospy.ServiceProxy('/scout_1/toggle_light', ToggleLightSrv)
    lights_on('high')

    # TODO load it from configuration
    # task 1
    rospy.Subscriber('/qual_1_score', Qual1ScoringMsg, callback_topic, '/qual_1_score')
    rospy.Subscriber('/scout_1/volatile_sensor', VolSensorMsg, callback_topic, '/scout_1/volatile_sensor')

    rospy.Subscriber('/scout_1/camera/left/image_raw/compressed', CompressedImage, callback_topic,
                     '/scout_1/camera/left/image_raw/compressed')
    rospy.Subscriber('/scout_1/camera/right/image_raw/compressed', CompressedImage, callback_topic, 
                     '/scout_1/camera/right/image_raw/compressed')

    steering_msg = Float64()
    steering_msg.data = 0
    effort_msg = Float64()
    effort_msg.data = 0

    QSIZE = 10
    vel_fl_publisher = rospy.Publisher('/scout_1/fl_wheel_controller/command', Float64, queue_size=QSIZE)
    vel_fr_publisher = rospy.Publisher('/scout_1/fr_wheel_controller/command', Float64, queue_size=QSIZE)
    vel_bl_publisher = rospy.Publisher('/scout_1/bl_wheel_controller/command', Float64, queue_size=QSIZE)
    vel_br_publisher = rospy.Publisher('/scout_1/br_wheel_controller/command', Float64, queue_size=QSIZE)

    steering_fl_publisher = rospy.Publisher('/scout_1/fl_steering_arm_controller/command', Float64, queue_size=QSIZE)
    steering_fr_publisher = rospy.Publisher('/scout_1/fr_steering_arm_controller/command', Float64, queue_size=QSIZE)
    steering_bl_publisher = rospy.Publisher('/scout_1/bl_steering_arm_controller/command', Float64, queue_size=QSIZE)
    steering_br_publisher = rospy.Publisher('/scout_1/br_steering_arm_controller/command', Float64, queue_size=QSIZE)

    r = rospy.Rate(100)
    osgar_debug('starting ...')
    while True:
        try:
            message = ""
            try:
                while 1:
                    message = g_socket2.recv(zmq.NOBLOCK)
            except:
                pass

            message_type = message.split(" ")[0]
            if message_type == "cmd_rover":
                arr = [float(x) for x in message.split()[1:]]
                for pub, angle in zip(
                        [steering_fl_publisher, steering_fr_publisher, steering_bl_publisher, steering_br_publisher],
                        arr[:4]):
                    steering_msg.data = angle
                    pub.publish(steering_msg)

                for pub, effort in zip(
                        [vel_fl_publisher, vel_fr_publisher, vel_bl_publisher, vel_br_publisher],
                        arr[4:]):
                    effort_msg.data = effort
                    pub.publish(effort_msg)

            elif message_type == "request_origin":
                osgar_debug('calling request_origin')
                print("Requesting true pose")
                try:
                    rospy.wait_for_service("/scout_1/get_true_pose", timeout=2.0)
                    request_origin = rospy.ServiceProxy('/scout_1/get_true_pose', LocalizationSrv)
                    p = request_origin(True)
                    s = "origin scout_1 %f %f %f  %f %f %f %f" % (p.pose.position.x, p.pose.position.y, p.pose.position.z, 
                                                                  p.pose.orientation.x, p.pose.orientation.y, p.pose.orientation.z, p.pose.orientation.w)
                    print(s)
                    socket_send(s)
                except rospy.service.ServiceException as e:
                    print(e)
                    osgar_debug('rospy exception')

            elif message_type == "artf":
                osgar_debug('calling artf')
                s = message.split()[1:]  # ignore "artf" prefix
                x, y, z = [float(a) for a in s[1:]]
                pose = geometry_msgs.msg.Point(x, y, z)
                vol_type = s[0]
                print("Reporting artifact %s at position %f %f %f" % (vol_type, x, y, z))
                try:
                    rospy.wait_for_service("/vol_detected_service", timeout=2.0)
                    report_artf = rospy.ServiceProxy('/vol_detected_service', Qual1ScoreSrv)
                    resp = report_artf(pose=pose, vol_type=vol_type)
                    print("Volatile report result: %r" % resp.result)
                    osgar_debug('volatile result:' + str(resp.result))
                except rospy.ServiceException as exc:
                    print("/vol_detected_service exception: " + str(exc))
                    osgar_debug('volatile exception')

            else:
                if len(message_type) > 0: 
                    print("Unhandled message type: %s" % message_type)

        except zmq.error.Again:
            pass
        r.sleep()
示例#20
0
def get_context():
    global context
    if context.closed:
        context = zmq.Context()
        context.linger = ZMQ_SOCKET_LINGER
    return context
示例#21
0
 def _monitor_client():
     monitor_client_inst = monitor_client.MonitorClient(
         zmq.Context(), "tcp://localhost:5000")
     self.assertEqual(monitor_client_inst.dump_all_counter_data(),
                      monitor_cache)
示例#22
0
def reset_context():
    global context
    context.term()
    context = zmq.Context()
    context.linger = ZMQ_SOCKET_LINGER
示例#23
0
import sys
import time
import zmq

ctx = zmq.Context()
xpub = ctx.socket(zmq.XPUB)
xpub.bind('tcp://127.0.0.1:24000')
xsub = ctx.socket(zmq.XSUB)
xsub.bind('tcp://127.0.0.1:23000')
zmq.proxy(xpub, xsub)
示例#24
0
def pub_zmq_config(port):
    context = zmq.Context()
    sock = context.socket(zmq.PUB)
    sock.bind("tcp://127.0.0.1:%s" % port)
    time.sleep(0.5)
    return sock
示例#25
0
 def __init__(self, uri):
     xmlrpclib.Transport.__init__(self)
     self.context = zmq.Context()
     self.connection = self.context.socket(zmq.REQ)
     self.connection.connect(uri)
示例#26
0
    def __init__(self, opts, publish_handler, io_loop=None):
        self.opts = salt.config.DEFAULT_MINION_OPTS.copy()
        default_minion_sock_dir = self.opts['sock_dir']
        self.opts.update(opts)

        self.publish_handler = publish_handler

        self.io_loop = io_loop or zmq.eventloop.ioloop.ZMQIOLoop()
        self.context = zmq.Context()

        hash_type = getattr(hashlib, self.opts['hash_type'])
        # Only use the first 10 chars to keep longer hashes from exceeding the
        # max socket path length.
        id_hash = hash_type(salt.utils.to_bytes(
            self.opts['id'])).hexdigest()[:10]
        epub_sock_path = os.path.join(
            self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash))
        if os.path.exists(epub_sock_path):
            os.unlink(epub_sock_path)
        epull_sock_path = os.path.join(
            self.opts['sock_dir'], 'minion_event_{0}_pull.ipc'.format(id_hash))
        if os.path.exists(epull_sock_path):
            os.unlink(epull_sock_path)

        self.epub_sock = self.context.socket(zmq.PUB)

        if self.opts['ipc_mode'] == 'tcp':
            epub_uri = 'tcp://127.0.0.1:{0}'.format(self.opts['tcp_pub_port'])
            epull_uri = 'tcp://127.0.0.1:{0}'.format(
                self.opts['tcp_pull_port'])
        else:
            epub_uri = 'ipc://{0}'.format(epub_sock_path)
            salt.utils.zeromq.check_ipc_path_max_len(epub_uri)
            epull_uri = 'ipc://{0}'.format(epull_sock_path)
            salt.utils.zeromq.check_ipc_path_max_len(epull_uri)

        log.debug('{0} PUB socket URI: {1}'.format(self.__class__.__name__,
                                                   epub_uri))
        log.debug('{0} PULL socket URI: {1}'.format(self.__class__.__name__,
                                                    epull_uri))

        minion_sock_dir = self.opts['sock_dir']

        if not os.path.isdir(minion_sock_dir):
            # Let's try to create the directory defined on the configuration
            # file
            try:
                os.makedirs(minion_sock_dir, 0o755)
            except OSError as exc:
                log.error('Could not create SOCK_DIR: {0}'.format(exc))
                # Let's not fail yet and try using the default path
                if minion_sock_dir == default_minion_sock_dir:
                    # We're already trying the default system path, stop now!
                    raise

                if not os.path.isdir(default_minion_sock_dir):
                    try:
                        os.makedirs(default_minion_sock_dir, 0o755)
                    except OSError as exc:
                        log.error('Could not create SOCK_DIR: {0}'.format(exc))
                        # Let's stop at this stage
                        raise

        # Create the pull socket
        self.epull_sock = self.context.socket(zmq.PULL)

        # Securely bind the event sockets
        if self.opts['ipc_mode'] != 'tcp':
            old_umask = os.umask(0o177)
        try:
            log.info('Starting pub socket on {0}'.format(epub_uri))
            self.epub_sock.bind(epub_uri)
            log.info('Starting pull socket on {0}'.format(epull_uri))
            self.epull_sock.bind(epull_uri)
        finally:
            if self.opts['ipc_mode'] != 'tcp':
                os.umask(old_umask)

        self.stream = zmq.eventloop.zmqstream.ZMQStream(self.epull_sock,
                                                        io_loop=self.io_loop)
        self.stream.on_recv(self.handle_publish)
示例#27
0
def main():
    __logger.info('Initialising Database Connection')
    __db = eddn.database(__config['database']['url'], __logger)

    __context = zmq.Context()
    __subscriber = __context.socket(zmq.SUB)

    __subscriber.setsockopt(zmq.SUBSCRIBE, b"")
    __subscriber.setsockopt(zmq.RCVTIMEO, __timeoutEDDN)
    __subscriber.setsockopt(zmq.RCVHWM, __zmqReceiveHWMLimit)

    __logger.info('Starting EDDN Subscriber')
    ############################################################################
    while True:
        ##########################################################################
        try:
            __subscriber.connect(__relayEDDN)
            __logger.info('Connect to ' + __relayEDDN)

            ########################################################################
            while True:
                __message = __subscriber.recv()

                if __message == False:
                    __subscriber.disconnect(__relayEDDN)
                    __logger.warning('Disconnect from ' + __relayEDDN)
                    break

                __logger.debug('Got a message')

                __message = zlib.decompress(__message)
                if __message == False:
                    __logger.warning('Failed to decompress message')
                    continue

                ###############################################################
                # Validate message against relevant schema and blacklist
                ###############################################################
                (__eddn_message, __message_valid, __message_blacklisted,
                 __message_schema_is_test) = validateEDDNMessage(
                     __message, __config, __logger)
                if not __eddn_message:
                    continue

                ###############################################################
                # Insert data into database
                ###############################################################
                __db.insertMessage(__eddn_message.json,
                                   __eddn_message.schemaref,
                                   __eddn_message.gatewaytimestamp,
                                   __message_blacklisted, __message_valid,
                                   __message_schema_is_test)
                #exit(0)
                ###############################################################
            ########################################################################

        except zmq.ZMQError as e:
            __logger.error('ZMQSocketException: ' + str(e))
            __subscriber.disconnect(__relayEDDN)
            __logger.warning('Disconnect from ' + __relayEDDN)
            time.sleep(5)
示例#28
0
    def run(self):
        '''
        Bind the pub and pull sockets for events
        '''
        salt.utils.appendproctitle(self.__class__.__name__)
        linger = 5000
        # Set up the context
        self.context = zmq.Context(1)
        # Prepare the master event publisher
        self.epub_sock = self.context.socket(zmq.PUB)
        try:
            self.epub_sock.setsockopt(zmq.HWM,
                                      self.opts['event_publisher_pub_hwm'])
        except AttributeError:
            self.epub_sock.setsockopt(zmq.SNDHWM,
                                      self.opts['event_publisher_pub_hwm'])
            self.epub_sock.setsockopt(zmq.RCVHWM,
                                      self.opts['event_publisher_pub_hwm'])
        # Prepare master event pull socket
        self.epull_sock = self.context.socket(zmq.PULL)
        if self.opts['ipc_mode'] == 'tcp':
            epub_uri = 'tcp://127.0.0.1:{0}'.format(
                self.opts['tcp_master_pub_port'])
            epull_uri = 'tcp://127.0.0.1:{0}'.format(
                self.opts['tcp_master_pull_port'])
        else:
            epub_uri = 'ipc://{0}'.format(
                os.path.join(self.opts['sock_dir'], 'master_event_pub.ipc'))
            salt.utils.zeromq.check_ipc_path_max_len(epub_uri)
            epull_uri = 'ipc://{0}'.format(
                os.path.join(self.opts['sock_dir'], 'master_event_pull.ipc'))
            salt.utils.zeromq.check_ipc_path_max_len(epull_uri)

        # Start the master event publisher
        old_umask = os.umask(0o177)
        try:
            self.epull_sock.bind(epull_uri)
            self.epub_sock.bind(epub_uri)
            if self.opts['client_acl'] or self.opts['client_acl_blacklist']:
                salt.utils.warn_until(
                    'Nitrogen',
                    'ACL rules should be configured with \'publisher_acl\' and '
                    '\'publisher_acl_blacklist\' not \'client_acl\' and '
                    '\'client_acl_blacklist\'. This functionality will be removed in Salt '
                    'Nitrogen.')
            if (self.opts['ipc_mode'] != 'tcp'
                    and (self.opts['publisher_acl'] or self.opts['client_acl']
                         or self.opts['external_auth'])):
                os.chmod(
                    os.path.join(self.opts['sock_dir'],
                                 'master_event_pub.ipc'), 0o666)
        finally:
            os.umask(old_umask)
        try:
            while True:
                # Catch and handle EINTR from when this process is sent
                # SIGUSR1 gracefully so we don't choke and die horribly
                try:
                    package = self.epull_sock.recv()
                    self.epub_sock.send(package)
                except zmq.ZMQError as exc:
                    if exc.errno == errno.EINTR:
                        continue
                    raise exc
        except KeyboardInterrupt:
            if self.epub_sock.closed is False:
                self.epub_sock.setsockopt(zmq.LINGER, linger)
                self.epub_sock.close()
            if self.epull_sock.closed is False:
                self.epull_sock.setsockopt(zmq.LINGER, linger)
                self.epull_sock.close()
            if self.context.closed is False:
                self.context.term()
示例#29
0
#!/usr/bin/env python

import argparse
import mido
import time
import zmq
import struct
import time

DEVICE_POLLING_INTERVAL_SEC = 3

# TODO use env vars
HOST = "127.0.0.1"
INPUT_ZMQ_URL = f"tcp://{HOST}:20300"

context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind(INPUT_ZMQ_URL)


def timed_sender():
    yield
    t1 = time.perf_counter_ns()
    t2 = t1
    while True:
        msg = yield t2 - t1

        arr = bytearray()
        arr += struct.pack("<Q", t2 - t1)
        arr += msg.bin()
        socket.send(arr)
示例#30
0
def main():
    # Parse command line args.
    parser = argparse.ArgumentParser(description='Calico ACL Manager')
    parser.add_argument('-c', '--config-file', dest='config_file')
    args = parser.parse_args()

    log_defaults = {
        'LogFilePath': None,
        'LogSeverityFile': 'INFO',
        'LogSeveritySys': 'ERROR',
        'LogSeverityScreen': 'ERROR',
        'LocalAddress': '*'
    }

    # Read config file.
    config = ConfigParser.ConfigParser(log_defaults)
    config.read(args.config_file or 'acl_manager.cfg')

    plugin_address = config.get('global', 'PluginAddress')
    local_address = config.get('global', 'LocalAddress')
    log_file_path = config.get('log', 'LogFilePath')
    log_file_level = config.get('log', 'LogSeverityFile')
    log_syslog_level = config.get('log', 'LogSeveritySys')
    log_stream_level = config.get('log', 'LogSeverityScreen')

    # Convert log level names into python log levels.
    loglevels = {
        "none": None,
        "debug": logging.DEBUG,
        "info": logging.INFO,
        "warn": logging.WARNING,
        "warning": logging.WARNING,
        "err": logging.ERROR,
        "error": logging.ERROR,
        "crit": logging.CRITICAL,
        "critical": logging.CRITICAL
    }

    file_level = loglevels[log_file_level.lower()]
    syslog_level = loglevels[log_syslog_level.lower()]
    stream_level = loglevels[log_stream_level.lower()]

    # Configure logging.
    common.default_logging()
    common.complete_logging(logfile=log_file_path,
                            file_level=file_level,
                            syslog_level=syslog_level,
                            stream_level=stream_level)

    # Create ZeroMQ context.
    context = zmq.Context()
    log.info("pyzmq version is %s" % zmq.pyzmq_version())

    # Create and start components.
    acl_store = ACLStore()
    network_store = NetworkStore()

    publisher = ACLPublisher(context, acl_store, local_address)
    acl_store.start(publisher)

    processor = RuleProcessor(acl_store, network_store)
    network_store.add_processor(processor)

    subscriber = NetworkSubscriber(context, network_store, plugin_address)