Ejemplo n.º 1
0
def setup(hass, config):
    """Set up Zeroconf and make Home Assistant discoverable."""
    from zeroconf import Zeroconf, ServiceInfo

    zeroconf = Zeroconf()

    zeroconf_name = '{}.{}'.format(hass.config.location_name, ZEROCONF_TYPE)

    requires_api_password = hass.config.api.api_password is not None
    params = {
        'version': __version__,
        'base_url': hass.config.api.base_url,
        'requires_api_password': requires_api_password,
    }

    info = ServiceInfo(ZEROCONF_TYPE, zeroconf_name,
                       socket.inet_aton(hass.config.api.host),
                       hass.config.api.port, 0, 0, params)

    zeroconf.register_service(info)

    def stop_zeroconf(event):
        """Stop Zeroconf."""
        zeroconf.unregister_service(info)
        zeroconf.close()

    hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_zeroconf)

    return True
Ejemplo n.º 2
0
def main():
    desc = '%s [Args] [Options]\nDetailed options -h or --help' % __file__
    parser = ArgumentParser(description=desc)
    add_mqtt_arguments(parser, topic_default=DEFAULT_TOPIC_BASE)

    args = parser.parse_args()

    global topic_base
    topic_base = args.topic

    logging.basicConfig(level=get_log_level(args), format=LOG_FORMAT)

    zeroconf = Zeroconf()
    mqtt_client = mqtt.Client()
    listener = HostListener(mqtt_client)
    mqtt_client.on_connect = listener.on_connect
    mqtt_client.on_message = listener.on_message
    connect_mqtt(args, mqtt_client)
    browser = ServiceBrowser(zeroconf, SERVICE_TYPE, listener)
    try:
        mqtt_client.loop_forever()
    except KeyboardInterrupt:
        pass
    finally:
        zeroconf.close()
Ejemplo n.º 3
0
class Scanner(threading.Thread):
    def __init__(self):
        super(Scanner, self).__init__()
        self.abort   = False


    def run(self):
        utils.Log('Starting Zeroconf Scan')

        self.listener = MyListener()
        self.zeroconf = Zeroconf()
        self.browser  = ServiceBrowser(self.zeroconf, "_http._tcp.local.", self.listener)

        while not self.abort:
            xbmc.sleep(100)

        self.zeroconf.close() 

        utils.Log('Ending Zeroconf Scan')

        exit()


    def stop(self):
        self.abort = True


    def getServers(self):
        return getServers()
Ejemplo n.º 4
0
def setup(hass, config):
    """Set up Zeroconf and make Home Assistant discoverable."""
    from zeroconf import Zeroconf, ServiceInfo

    zeroconf = Zeroconf()

    zeroconf_name = '{}.{}'.format(hass.config.location_name, ZEROCONF_TYPE)

    requires_api_password = hass.config.api.api_password is not None
    params = {
        'version': __version__,
        'base_url': hass.config.api.base_url,
        'requires_api_password': requires_api_password,
    }

    host_ip = util.get_local_ip()

    try:
        host_ip_pton = socket.inet_pton(socket.AF_INET, host_ip)
    except socket.error:
        host_ip_pton = socket.inet_pton(socket.AF_INET6, host_ip)

    info = ServiceInfo(ZEROCONF_TYPE, zeroconf_name, host_ip_pton,
                       hass.http.server_port, 0, 0, params)

    zeroconf.register_service(info)

    def stop_zeroconf(event):
        """Stop Zeroconf."""
        zeroconf.unregister_service(info)
        zeroconf.close()

    hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_zeroconf)

    return True
Ejemplo n.º 5
0
def setup(hass, config):
    """Set up Zeroconf and make Home Assistant discoverable."""
    from zeroconf import Zeroconf, ServiceInfo

    zeroconf = Zeroconf()

    zeroconf_name = "{}.{}".format(hass.config.location_name,
                                   ZEROCONF_TYPE)

    requires_api_password = (hass.config.api.api_password is not None)
    params = {"version": __version__, "base_url": hass.config.api.base_url,
              "requires_api_password": requires_api_password}

    info = ServiceInfo(ZEROCONF_TYPE, zeroconf_name,
                       socket.inet_aton(hass.config.api.host),
                       hass.config.api.port, 0, 0, params)

    zeroconf.register_service(info)

    def stop_zeroconf(event):
        """Stop Zeroconf."""
        zeroconf.unregister_service(info)
        zeroconf.close()

    hass.bus.listen_once(EVENT_BLUMATE_STOP, stop_zeroconf)

    return True
Ejemplo n.º 6
0
    def discover(self):
        print("{status} Smart Module hosting asset {asset_id} {asset_type} {asset_context}.".format(
            status="Mock" if self.rtc.mock else "Real",
            asset_id=self.asset.id,
            asset_type=self.asset.type,
            asset_context=self.asset.context))

        try:
            max_sleep_time = 3 # Calling sleep should be reviewed.
            zeroconf = Zeroconf()
            Log.info("Performing Broker discovery...")
            self.find_broker(zeroconf)
            time.sleep(max_sleep_time) # Wait for max_sleep_time to see if we found it.
            if self.comm.broker_name or self.comm.broker_ip: # Found it.
                Log.info("MQTT Broker: {broker_name} IP: {broker_ip}.".format(
                    broker_name=self.comm.broker_name,
                    broker_ip=self.comm.broker_ip))
            else: # Make necessary actions to become the broker.
                Log.info("Broker not found. Becoming the broker.")
                self.become_broker()
            time.sleep(max_sleep_time)
            self.comm.connect() # Now it's time to connect to the broker.
        except Exception as excpt:
            Log.exception("[Exiting] Trying to find or become the broker.")
        finally:
            Log.info("Closing Zeroconf connection.")
            zeroconf.close()

        t_end = time.time() + 10
        while (time.time() < t_end) and not self.comm.is_connected:
            time.sleep(1)

        self.comm.subscribe("SCHEDULER/RESPONSE")
        self.comm.send("SCHEDULER/QUERY", "Where are you?")
        Log.info("Waiting for Scheduler response...")
        time.sleep(5) # Just wait for reply... Need a review?

        self.comm.send("ANNOUNCE", self.hostname + " is online.")

        t_end = time.time() + 2
        while (time.time() < t_end) and not self.comm.is_connected:
            time.sleep(1)

        if not self.comm.scheduler_found: # Become the Scheduler (necessary actions as Scheduler)
            try:
                Log.info("No Scheduler found. Becoming the Scheduler.")
                self.scheduler = Scheduler()
                self.scheduler.smart_module = self
                self.scheduler.prepare_jobs(self.scheduler.load_schedule())
                self.comm.scheduler_found = True
                self.comm.subscribe("SCHEDULER/QUERY")
                self.comm.unsubscribe("SCHEDULER/RESPONSE")
                self.comm.subscribe("STATUS/RESPONSE" + "/#")
                self.comm.subscribe("ASSET/RESPONSE" + "/#")
                self.comm.subscribe("ALERT" + "/#")
                self.comm.send("SCHEDULER/RESPONSE", self.hostname)
                self.comm.send("ANNOUNCE", self.hostname + " is running the Scheduler.")
                Log.info("Scheduler program loaded.")
            except Exception as excpt:
                Log.exception("Error initializing scheduler. %s.", excpt)
    def test_integration_with_subtype_and_listener(self):
        subtype_ = "_subtype._sub"
        type_ = "_type._tcp.local."
        name = "xxxyyy"
        # Note: discovery returns only DNS-SD type not subtype
        discovery_type = "%s.%s" % (subtype_, type_)
        registration_name = "%s.%s" % (name, type_)

        zeroconf_registrar = Zeroconf(interfaces=['127.0.0.1'])
        desc = {'path': '/~paulsm/'}
        info = ServiceInfo(
            discovery_type, registration_name,
            socket.inet_aton("10.0.1.2"), 80, 0, 0,
            desc, "ash-2.local.")
        zeroconf_registrar.register_service(info)

        try:
            service_types = ZeroconfServiceTypes.find(
                interfaces=['127.0.0.1'], timeout=0.5)
            assert discovery_type in service_types
            service_types = ZeroconfServiceTypes.find(
                zc=zeroconf_registrar, timeout=0.5)
            assert discovery_type in service_types

        finally:
            zeroconf_registrar.close()
Ejemplo n.º 8
0
def test_integration():
    service_added = Event()
    service_removed = Event()

    type_ = "_http._tcp.local."
    registration_name = "xxxyyy.%s" % type_

    def on_service_state_change(zeroconf, service_type, state_change, name):
        if name == registration_name:
            if state_change is ServiceStateChange.Added:
                service_added.set()
            elif state_change is ServiceStateChange.Removed:
                service_removed.set()

    zeroconf_browser = Zeroconf()
    browser = ServiceBrowser(zeroconf_browser, type_, [on_service_state_change])

    zeroconf_registrar = Zeroconf()
    desc = {'path': '/~paulsm/'}
    info = ServiceInfo(
        type_, registration_name,
        socket.inet_aton("10.0.1.2"), 80, 0, 0,
        desc, "ash-2.local.")
    zeroconf_registrar.register_service(info)

    try:
        service_added.wait(1)
        assert service_added.is_set()
        zeroconf_registrar.unregister_service(info)
        service_removed.wait(1)
        assert service_removed.is_set()
    finally:
        zeroconf_registrar.close()
        browser.cancel()
        zeroconf_browser.close()
Ejemplo n.º 9
0
class MDnsListener(object):
  """A MDNS Listener."""

  def __init__(self):
    self.logger = _log.GetLogger('LogoCert')
    self.zeroconf = Zeroconf(InterfaceChoice.All)
    self.listener = MDnsService()

  def add_listener(self, proto):
    """Browse for announcements of a particular protocol.

    Args:
      proto: string, type of traffic to listen for.
    Returns:
      boolean, True = browser activated, False = errors detected.
    """
    protocols = {'http': '_http._tcp.local.',
                 'ipp': '_ipp._tcp.local.',
                 'mdns': '_mdns._udp.local.',
                 'printer': '_printer._tcp.local.',
                 'privet': '_privet._tcp.local.',
                }

    if proto not in protocols:
      self.logger.error('Error starting listener, %s protocal unkown', proto)
      return False

    ServiceBrowser(self.zeroconf, protocols[proto], self.listener)
    self.logger.info('Browsing for %s services...', proto)
    return True

  def remove_listeners(self):
    """Remove all listeners."""
    self.zeroconf.close()
    self.logger.info('All listeners have been stopped.')
Ejemplo n.º 10
0
class PublishService(Service):
    """Service implementing zeroconf publishing service for chains master servers"""

    def onInit(self):
        log.info('Zeroconf publish init')
        self.ip_addr = self._get_ip()
        self.hostname = socket.gethostname()
        self.services = []
        self.desc = {'Description': 'Chains Home Automation service on rabbitmq'}
        self.amqp_info = ServiceInfo("_amqp._tcp.local.",
                                     "Chains Master AMQP %s._amqp._tcp.local." % self.hostname,
                                     socket.inet_aton(self.ip_addr), 5672, 0, 0,
                                     self.desc, "%s.local." % self.hostname)
        self.service.append(self.amqp_info)
        self.zeroconf = Zeroconf()

    def onStart(self):
        log.info('Starting zeroconf publishing service')
        for info in self.services:
            self.zeroconf.registerService(info)

    def _get_ip(self):
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect(('google.com', 80))
        ip = sock.getsockname()[0]
        sock.close()
        return ip
Ejemplo n.º 11
0
    def find(cls, timeout=10, fast=False):
        """Use Zeroconf/Bonjour to locate AirPlay servers on the local network

        Args:
            timeout(int):   The number of seconds to wait for responses.
                            If fast is false, then this function will always block for this number of seconds.
            fast(bool):     If true, do not wait for timeout to expire,
                            return as soon as we've found at least one AirPlay server

        Returns:
            list:   A list of AirPlay() objects; one for each AirPlay server found

        """

        # this will be our list of devices
        devices = []

        # zeroconf will call this method when a device is found
        def on_service_state_change(zeroconf, service_type, name, state_change):
            if state_change is ServiceStateChange.Added:
                info = zeroconf.get_service_info(service_type, name)
                if info is None:
                    return

                try:
                    name, _ = name.split('.', 1)
                except ValueError:
                    pass

                devices.append(
                    cls(socket.inet_ntoa(info.address), info.port, name)
                )

        # search for AirPlay devices
        try:
            zeroconf = Zeroconf()
            browser = ServiceBrowser(zeroconf, "_airplay._tcp.local.", handlers=[on_service_state_change])  # NOQA
        except NameError:
            warnings.warn(
                'AirPlay.find() requires the zeroconf package but it could not be imported. '
                'Install it if you wish to use this method. https://pypi.python.org/pypi/zeroconf',
                stacklevel=2
            )
            return None

        # enforce the timeout
        timeout = time.time() + timeout
        try:
            while time.time() < timeout:
                # if they asked us to be quick, bounce as soon as we have one AirPlay
                if fast and len(devices):
                    break
                time.sleep(0.05)
        except KeyboardInterrupt:  # pragma: no cover
            pass
        finally:
            zeroconf.close()

        return devices
Ejemplo n.º 12
0
def zeroconf_without_service_info(mocker):
    from zeroconf import Zeroconf

    zeroconf_stub = mocker.stub(name='get_service_info')
    zeroconf_stub.return_value = None
    stub_object = Zeroconf()
    stub_object.get_service_info = zeroconf_stub
    return stub_object
Ejemplo n.º 13
0
def zeroconf(mocker):
    from zeroconf import Zeroconf

    service_info = ServiceInfoFactory().create()
    zeroconf_stub = mocker.stub(name='get_service_info')
    zeroconf_stub.return_value = service_info
    stub_object = Zeroconf()
    stub_object.get_service_info = zeroconf_stub
    return stub_object
Ejemplo n.º 14
0
 def get_others(self):
     """
     Wait for other services to make themselves visible
     """
     zeroconf = Zeroconf()
     _ = ServiceBrowser(zeroconf, "_http._tcp.local.", handlers=[self.on_service_state_change])
     for _ in range(1, 10 + 1):
         time.sleep(1)
     zeroconf.close()
     return self.known_servers
Ejemplo n.º 15
0
def main():
    """Main entry point."""
    main = MainApplication()
    zeroconf = Zeroconf()
    listener = MyListener(main.get_control())
    browser = ServiceBrowser(zeroconf, "_http._tcp.local.", listener)
    try:
        main.mainloop()
    finally:
        zeroconf.close()
Ejemplo n.º 16
0
def test_close_waits_for_threads():
    class Dummy(object):
      def add_service(self, zeroconf_obj, service_type, name):
        pass
      def remove_service(self, zeroconf_obj, service_type, name):
        pass

    z = Zeroconf()
    z.add_service_listener('_privet._tcp.local.', listener=Dummy())
    z.close()
    assert not z.browsers[0].is_alive()
Ejemplo n.º 17
0
    def run(self):
        #Log.debug('get_service_info')
        zeroconf = Zeroconf()
        listener = ServiceListener()
        service_type = "_compass_discovery._tcp.local."

        browser = ServiceBrowser(zeroconf, service_type, listener)
        #Log.debug('enter browser')
        try:
            while True:
                pass
        finally:
            zeroconf.close()
def serviceStatus():

    zeroconf = Zeroconf()

    info = zeroconf.get_service_info("_http._tcp.local.", "Takiyaki._http._tcp.local.")

    # If service registered #
    if info:
        return (socket.inet_ntoa(info.address), info.port)
    # No named service registered #
    else:
        print("Service doesn't exist")
    zeroconf.close()
Ejemplo n.º 19
0
 def __init__(self, logger, if_addr=None):
   """Initialization requires a logger.
   
   Args:
     logger: initialized logger object.
     if_addr: string, interface address for Zeroconf, None means all interfaces.
   """
   # self.logger = _log.GetLogger('LogoCert')
   self.logger = logger
   if if_addr:
     self.zeroconf = Zeroconf([if_addr])
   else:
     self.zeroconf = Zeroconf(InterfaceChoice.All)
   self.listener = MDnsService(logger)
Ejemplo n.º 20
0
class ZeroconfService(AbstractZeroconfService):
    """
    :class:`ZeroconfService` uses `python zeroconf`_

    .. _python zeroconf: https://pypi.org/project/zeroconf/

    Install::

    .. code-block:: bash

        pip install zeroconf
    """

    def __init__(self, name, port):
        super(ZeroconfService, self).__init__(name, port)

        self._zeroconf = None
        self._infos = []

    @classmethod
    def has_support(cls):
        return support

    def start(self):
        self._zeroconf = Zeroconf()
        for index, ip in enumerate(self.ips):
            info = self._gerenate_service_info(index, ip)
            self._infos.append(info)

            self._zeroconf.register_service(info)
            self._log('Zeroconf {} - Registered service: name={}, regtype={}, domain={}', self.__class__.__name__,
                      self.name, self.type, 'local.')
            self._log('         Network: {}', ip)

    def _gerenate_service_info(self, index, ip):
        name = '{}-{}.{}.local.'.format(self.name.lower(), index, self.type, '.local.')

        return ServiceInfo(
            self.type + '.local.',
            name,
            socket.inet_aton(ip),
            self.port,
            0,
            0,
            {}
        )

    def close(self):
        for info in self._infos:
            self._zeroconf.unregister_service(info)
Ejemplo n.º 21
0
	def from_dacp_id(cls, dacp_id, token):
		zeroconf = Zeroconf()
		try:
			listener = ServiceListener(airplay_prefix.format(dacp_id=dacp_id), zeroconf)
			browser = ServiceBrowser(zeroconf, airplay_zeroconf_service, listener)
			wait_for_it = ResultWaiter(listener, browser)
			wait_for_it.start()
			wait_for_it.join()
			del wait_for_it
		finally:
			zeroconf.close()
		assert(listener.info)  # fails if service was not found.
		host = "http://" +  binary_ip_to_str(listener.info.address)
		port = listener.info.port
		return AirplayRemote(token, host, port)
Ejemplo n.º 22
0
    def __init__(self, hostname, args=None):
        if zeroconf_tag:
            zeroconf_bind_address = args.bind_address
            try:
                self.zeroconf = Zeroconf()
            except socket.error as e:
                logger.error("Cannot start zeroconf: {0}".format(e))

            # XXX *BSDs: Segmentation fault (core dumped)
            # -- https://bitbucket.org/al45tair/netifaces/issues/15
            if not BSD:
                try:
                    # -B @ overwrite the dynamic IPv4 choice
                    if zeroconf_bind_address == '0.0.0.0':
                        zeroconf_bind_address = self.find_active_ip_address()
                except KeyError:
                    # Issue #528 (no network interface available)
                    pass

            print("Announce the Glances server on the LAN (using {0} IP address)".format(zeroconf_bind_address))
            self.info = ServiceInfo(
                zeroconf_type, '{0}:{1}.{2}'.format(hostname, args.port, zeroconf_type),
                address=socket.inet_aton(zeroconf_bind_address), port=args.port,
                weight=0, priority=0, properties={}, server=hostname)
            self.zeroconf.register_service(self.info)
        else:
            logger.error("Cannot announce Glances server on the network: zeroconf library not found.")
Ejemplo n.º 23
0
    def announce(self, discovery_name, port):
        self.zeroconf = Zeroconf()
        self.zconfigs = []
        for i in netifaces.interfaces():
            if i.startswith("lo"):
                # remove loopback from announce
                continue
            if i.startswith("veth"):
                # remove docker interface from announce
                continue

            addrs = netifaces.ifaddresses(i)
            if addrs.keys() == [17]:
                continue
            print addrs
            for a in addrs[netifaces.AF_INET]:
                print a
                info_desc = {'path': '/progs_grp/', 'name': discovery_name}
                config = ServiceInfo("_aether._tcp.local.",
                               "%s_%s_%s_ubiquitous._aether._tcp.local." % (socket.gethostname(),i, port),
                               socket.inet_aton(a['addr']), port, 0, 0,
                               info_desc)# , "aether-autodisc-0.local.")

                self.zeroconf.register_service(config)
                self.zconfigs.append(config)
Ejemplo n.º 24
0
class ServiceDiscoveryClient:

    def __init__(self, service_timeout=None):
        self.zeroconf = Zeroconf()
        self.service_args = [SRV_TYPE, srv_fqname()]
        if service_timeout:
            self.service_args.append(service_timeout)

    @cached_property_with_ttl(ttl=1.0)
    def info(self):
        service_info = self.zeroconf.get_service_info(*self.service_args)
        if not service_info:
            raise ZeroconfServiceNotFound(
                "Pilotwire Controller Zeroconf service not found"
            )
        return service_info

    @property
    def address(self):
        # pylint: disable=no-member
        return socket.inet_ntoa(self.info.address)

    @property
    def port(self):
        # pylint: disable=no-member
        return str(self.info.port)
Ejemplo n.º 25
0
 def __init__(self, ip=None):
     """
     :ip: if string `ip` given, register on given IP
          (if None: default route's IP).
     """
     self.zeroconf = Zeroconf()
     self.info = build_service_info(ip=ip or main_ip())
    def __init__(self, hostname, args=None):
        if zeroconf_tag:
            zeroconf_bind_address = args.bind_address
            try:
                self.zeroconf = Zeroconf()
            except socket.error as e:
                logger.error("Cannot start zeroconf: {0}".format(e))

            if netifaces_tag:
                # -B @ overwrite the dynamic IPv4 choice
                if zeroconf_bind_address == '0.0.0.0':
                    zeroconf_bind_address = self.find_active_ip_address()
            else:
                logger.error("Couldn't find the active IP address: netifaces library not found.")

            logger.info("Announce the Glances server on the LAN (using {0} IP address)".format(zeroconf_bind_address))
            print("Announce the Glances server on the LAN (using {0} IP address)".format(zeroconf_bind_address))

            self.info = ServiceInfo(
                zeroconf_type, '{0}:{1}.{2}'.format(hostname, args.port, zeroconf_type),
                address=socket.inet_aton(zeroconf_bind_address), port=args.port,
                weight=0, priority=0, properties={}, server=hostname)
            self.zeroconf.register_service(self.info)
        else:
            logger.error("Cannot announce Glances server on the network: zeroconf library not found.")
Ejemplo n.º 27
0
class ServiceListener(object):
    def __init__(self):
        # Is this necessary, when zeroconf is being passed into add_service?
        self.zconf = Zeroconf()
        self.services = []

    def remove_service(self, zeroconf, service_type, name):
        print "Not implemented."
        return True

    def add_service(self, zeroconf, service_type, name):
        this_service = {}
        this_service['name'] = name
        this_service['service_type'] = service_type
        this_service['address'] = ''
        this_service['server'] = ''
        info = self.zconf.get_service_info(service_type, name)
        if info:
            this_service['address'] = "%s:%d" % (socket.inet_ntoa(info.address), info.port)
            # Construct the link by finding service type in service_types object; or use http if not found in list.
            this_service['link'] = "%s%s" % (proto_dict.get(service_type, proto_dict['_http._tcp.local.']), this_service['address'])
            this_service['server'] = info.server
            # prop = info.getProperties()
            # if prop:
            #     print "  Properties are"
            #     for key, value in prop.items():
            #         print "    %s: %s" % (key, value)
        self.services.append(this_service)
Ejemplo n.º 28
0
	def __init__(self, listen_pin):
		GPIO.setwarnings(False)
		GPIO.setmode(GPIO.BOARD)
		GPIO.setup(listen_pin, GPIO.IN)
		self._pin = listen_pin
		
		self._log_fmt = logging.Formatter(fmt="%(asctime)s [%(levelname)-8s] %(message)s", datefmt="%b %d %H:%M:%S")
		self._log = logging.getLogger()
		self._log.setLevel(logging.INFO)
		
		streamHandler = logging.StreamHandler()
		streamHandler.setLevel(logging.DEBUG)
		streamHandler.setFormatter(self._log_fmt)
		self._log.addHandler(streamHandler)
		
		self._display = DisplayRunner.DisplayRunner()
		self._display.set_mode(0)
		self._display.start()
			
		self._new_connections = []
		self._accept_responses = threading.Event()
		self._responses = []
		
		self._conn = pika.BlockingConnection(pika.ConnectionParameters(host="localhost", virtual_host=self._vhost))	
		self._chan = self._conn.channel()
		
		queueResult = self._chan.queue_declare(auto_delete=True)
		if queueResult is None:
			self._log.error("Could not create connect queue")
			raise RuntimeError("Error configuring RabbitMQ")
		else:
			self._log.info("Created queue \'%s\'" % (queueResult.method.queue,))
			self._log.info("Using exchange \'%s/%s\'" % (self._vhost, self._exchange,))
			self._chan.exchange_declare(exchange=self._exchange, type="topic", auto_delete=True)
			self._chan.queue_bind(exchange=self._exchange, queue=queueResult.method.queue, routing_key=self._routing_key)
			
		self._queue_name = queueResult.method.queue
		
		server_ip, ifaceName = self._get_service_ip()
		if server_ip is None:
			self._log.error("Could not determine server IP")
			raise RuntimeError("Error finding server IP")
		else:
			self._log.info("Broadcasting service %s with IP %s (%s)" % (self._get_service_name(), server_ip, ifaceName))
		
		# Configure zeroconf to broadcast this service
		self._zeroconf = Zeroconf()
		self._zeroconf_info = ServiceInfo("_http._tcp.local.",
			self._get_service_name(),
			socket.inet_aton(server_ip),
			5672, 0, 0,
			{"exchange_name": self._exchange, "routing_key": self._routing_key, "virtual_host": self._vhost},
			None)
		
		try:
			self._zeroconf.register_service(self._zeroconf_info)
		except Zeroconf.NonUniqueNameException:
			self._log.warn("Service with name \'%s\' already broadcasting on this network!" % (self._get_service_name(),))
Ejemplo n.º 29
0
    def advertise(self):
        postfix = self.config['global']['service_prefix']
        self.port = int(self.config['global']['port'])
        #print(self.config['device']['hostname']+postfix)
        info = ServiceInfo(postfix, self.config['device']['hostname']+"."+postfix,
                       socket.inet_aton(self.ip), self.port, 0, 0,
                       {'info': self.config['device']['description']}, "hazc.local.")

        self.bindConnection()

        zeroconf = Zeroconf()
        zeroconf.register_service(info)


        try:
            while True:
#                 try:
                print("Ready")
                self.conn, self.addr = self.webcontrol.accept()
                self.listen()
                self.conn.close()
        except KeyboardInterrupt:
            pass
        finally:
            print()
            print("Unregistering...")
            zeroconf.unregister_service(info)
            zeroconf.close()

        try:
            print("Shutting down socket")
            self.webcontrol.shutdown(socket.SHUT_RDWR)
        except Exception as e:
            print(e)
Ejemplo n.º 30
0
 def __init__(self, address, port):
     self.address = address
     self.port = port
     self.properties = {
         'nickname': u'Printer Nickname',
         'service': u'Tryton POS'
     }
     self.service = Zeroconf()
     self._service_info = None
Ejemplo n.º 31
0
class Agent:
    def __init__(self, config):
        logging.info("Initialising agent")
        self._config = config

        self._uuid = config.uuid

        self._context_lock = threading.Lock()
        self._check_status_thread = threading.Thread()

        self._context = {
            'running': False,
            'info': types.Info(status=types.Status.SCANNING),
            'context_lock': self._context_lock,
            'config': self._config,
            'version': None,
            'state_meta': None,
            'fg_process': None
        }

        self._zeroconf_enabled = config.zeroconf_enabled

        if self._zeroconf_enabled:
            self._m_zeroconf = Zeroconf()
            self._zeroconf_desc = {
                'path': '/graphiql/',
                'endpoint': '/graphql/',
                'uuid': config.uuid,
                'ipaddress': config.my_ip
            }
            self._zeroconf_info = ServiceInfo(
                constants.AGENT_SERVICE_TYPE,
                config.agent_service_name,
                socket.inet_aton(config.my_ip), constants.AGENT_PORT, 0, 0,
                self._zeroconf_desc, f"{config.my_hostname}.local."
            )

    def run(self):
        logging.info("Starting agent")
        with self._context_lock:
            self._context['running'] = True

        flask_app = self._create_app()

        atexit.register(self._shutdown)

        if self._zeroconf_enabled:
            logging.info("Registration of a service, press Ctrl-C to exit...")
            self._m_zeroconf.register_service(self._zeroconf_info)

        self._check_status_thread = threading.Timer(10, self._check_status)
        self._check_status_thread.start()
        flask_app.run(host='0.0.0.0')

    def _check_status(self):
        # state machine that actually manages things
        with self._context_lock:
            if self._context['running']:
                current_status = self._context['info'].status
                current_errors = self._context['info'].errors
                current_aircraft = self._context['info'].aircraft
                current_os = self._context['info'].os
                current_os_string = self._context['info'].os_string
                current_fg_process = self._context['fg_process']

                next_os = current_os
                next_os_string = current_os_string
                next_status = current_status
                next_aircraft = current_aircraft
                next_errors = current_errors
                next_fg_process = current_fg_process

                config = self._context['config']

                if current_status == types.Status.SCANNING:
                    next_os, next_os_string = util.discover_os()
                    next_errors, memo = self._check_environment(
                        next_os, self._config)
                    self._context = {**self._context, **memo}

                    if len(next_errors) == 0:
                        next_status = types.Status.READY
                    else:
                        next_status = types.Status.ERROR

                elif current_status == types.Status.INSTALLING_AIRCRAFT:
                    #       the giant state machine will:
                    #           - check if the aircraft has already been cloned
                    #               - do an svn up on it if it already exists
                    #               - or a fresh check out
                    #               - progress state to READY or ERROR when done
                    #
                    svn_name = self._context['state_meta']
                    logging.info(
                        f"Installing or Updating aircraft '{svn_name}'")
                    expected_aircraft_path = Path(
                        config.aircraft_path,
                        svn_name
                    )
                    logging.info(
                        f"Checking if {expected_aircraft_path} exists")

                    if expected_aircraft_path.exists():
                        logging.info("Updating existing aircraft")
                        lc = svn.local.LocalClient(f"{expected_aircraft_path}")
                        try:
                            lc.update()
                            next_status = types.Status.READY
                            logging.info("Done updating aircraft")
                        except SvnException:
                            next_status = types.Status.ERROR
                            next_errors = [types.Error(
                                code=types.ErrorCode.AIRCRAFT_NOT_IN_VERSION_CONTROL,
                                description=f"Aircraft {svn_name} is not under version control. Delete the folder {expected_aircraft_path} and try reinstalling."
                            )]
                    else:
                        upstream_repo_url = f"{self._context['aircraft_svn_base_url']}/{svn_name}"
                        logging.info(f"Cloning from {upstream_repo_url}")
                        rc = svn.remote.RemoteClient(upstream_repo_url)
                        try:
                            rc.checkout(f"{expected_aircraft_path}")
                        except svn.exception.SvnException as e:
                            logging.error(
                                f"Unable to clone aircraft '{svn_name}': {e}")
                            next_status = types.Status.ERROR
                            next_errors = [
                                types.Error(
                                    code=types.ErrorCode.AIRCRAFT_INSTALL_FAILED,
                                    description=f"{e}"
                                )
                            ]
                        except FileNotFoundError:
                            next_status = types.Status.ERROR
                            next_errors = [types.Error(
                                code=types.ErrorCode.SVN_NOT_INSTALLED,
                                description=f"Aircraft {svn_name} could not be installed. Check that you have svn installed."
                            )]
                        else:
                            logging.info("Done cloning aircraft")
                            next_status = types.Status.READY

                elif current_status == types.Status.FGFS_START_REQUESTED:
                    # assemble arguments
                    env = config.assemble_fgfs_env_vars()
                    env_str = [f'{k}={v}' for k, v in env[0].items()]
                    env_str = ' '.join(env_str)
                    args = [f"{config.fgfs_path}"] + \
                        self._context['state_meta']
                    logging.info(f"***** About to trigger FGFS *****")
                    logging.info("")
                    logging.info(f"     env: {env_str}")
                    logging.info(f"     cmd: {' '.join(args)}")
                    logging.info("")
                    logging.info(f"*********************************")
                    next_fg_process = subprocess.Popen(
                        args,
                        text=True,
                        env=env[1]
                    )
                    self._context['state_meta'] = datetime.datetime.now()
                    next_status = types.Status.FGFS_STARTING

                elif current_status == types.Status.FGFS_STARTING:
                    # TODO: implement actual check whether FGFS is up
                    if (datetime.datetime.now() - self._context['state_meta']).seconds >= config.fgfs_startup_time:
                        next_status = types.Status.FGFS_RUNNING

                elif current_status == types.Status.FGFS_RUNNING:
                    # check that fgfs is still up, set error if it crashes
                    logging.debug(f"current_fg_process = {current_fg_process}")
                    logging.debug(
                        f"current_fg_process.poll() = {current_fg_process.poll()}")

                    if current_fg_process.poll() is not None:
                        rc = current_fg_process.returncode
                        if rc == 0:
                            next_status = types.Status.READY
                        else:
                            msg = f"Abnormal FlightGear termination with returncode {rc}!"
                            logging.error(msg)
                            next_errors = [
                                types.Error(
                                    code=types.ErrorCode.FGFS_ABNORMAL_EXIT,
                                    description=msg
                                )
                            ]
                            next_status = types.Status.ERROR

                        next_fg_process = None

                elif current_status == types.Status.FGFS_STOP_REQUESTED:
                    self._context['fg_process'].terminate()
                    self._context['fg_process'] = None
                    next_status = types.Status.READY
                elif current_status == types.Status.ERROR:
                    pass
                elif current_status == types.Status.READY:
                    pass

                self._context['info'] = types.Info(
                    status=next_status,
                    os=next_os,
                    os_string=next_os_string,
                    timestamp=int(time.time()),
                    errors=next_errors,
                    aircraft=next_aircraft,
                    uuid=self._uuid
                )
                self._context['fg_process'] = next_fg_process

                self._check_status_thread = threading.Timer(
                    10, self._check_status)
                self._check_status_thread.start()

    def _assemble_fg_args(self):
        return [
            self._context['config'].fgfs_path
        ]

    @staticmethod
    def windows_find_fgfs():
        install_loc = util.locate_fgfs_in_windows_registry()

        if install_loc:
            return f"{install_loc}bin\\fgfs.exe"

    @staticmethod
    def linux_find_fgfs():
        res = None
        memo = subprocess.run(
            ["which", "fgfs"],
            capture_output=True,
            text=True
        )
        if memo.returncode == 0:
            res = memo.stdout.strip()

        return res

    @staticmethod
    def darwin_find_fgfs():
        return Agent.linux_find_fgfs()

    @staticmethod
    def windows_find_fghome():
        return Path(
            os.environ['USERPROFILE'],
            'Documents\\FlightGear'
        )

    @staticmethod
    def linux_find_fghome():
        return Path(Path.home(), ".fgfs")

    @staticmethod
    def darwin_find_fghome():
        return Path(Path.home(), "Library/Application Support/FlightGear")

    def _check_environment(self, os_, config):
        error_list = []
        memo = {}
        # check fgfs location - executable
        fgfs_error = self._check_path_set_and_exists('fgfs')
        os_name_lower = os_.lower_name

        if fgfs_error is not None:
            fgfs_find_result = getattr(self, f"{os_name_lower}_find_fgfs")()

            if fgfs_find_result:
                logging.info(f"Found fgfs at {fgfs_find_result}!")
                config.fgfs_path = fgfs_find_result
                config.save()
                fgfs_error = None

        error_list += filter(None, [fgfs_error])

        fgroot_error = self._check_path_set_and_exists('fgroot')

        # http://www.flightgear.org/Docs/getstart/getstartch3.html
        # see http://wiki.flightgear.org/$FG_ROOT
        if fgroot_error is not None and fgfs_error is None:
            proposed_path = getattr(util, f"{os_name_lower}_find_fgroot")()

            if proposed_path is not None:
                path_obj = Path(proposed_path)
                if path_obj.exists():
                    logging.info(f"Found fgroot at {path_obj}!")
                    config.fgroot_path = path_obj
                    config.save()
                    fgroot_error = None

        error_list += filter(None, [fgroot_error])

        protocol_file_error = None

        if fgroot_error is None:
            # check for the custom protocol file
            expected_protocol_file = Path(
                config.fgroot_path,
                "Protocol",
                "fgo.xml"
            )

            if not expected_protocol_file.exists():
                protocol_file_error = agent_errors.ProtocolFileMissingError(
                    expected_protocol_file)
            elif not util.check_protocol_file(expected_protocol_file):
                protocol_file_error = agent_errors.ProtocolFileHashMismatch(
                    expected_protocol_file
                )

        error_list += filter(None, [protocol_file_error])

        fghome_error = self._check_path_set_and_exists('fghome')

        if fghome_error is not None:
            # http://wiki.flightgear.org/$FG_HOME
            path_obj = getattr(self, f"{os_name_lower}_find_fghome")()

            if path_obj.exists():
                logging.info(f"Found fghome at {path_obj}!")
                config.fghome_path = path_obj
                config.save()
                fghome_error = None

        error_list += filter(None, [fghome_error])

        # check if aircraft path set - directory
        aircraft_path_error = self._check_path_set_and_exists('aircraft')
        # if we have a fghome_error, see if there is an aircraft folder in it

        if aircraft_path_error is not None and fghome_error is None:
            proposed_path = Path(config.fghome_path, 'Aircraft')
            if proposed_path.exists():
                logging.info(f"Found aircraft at {proposed_path}!")
                config.aircraft_path = proposed_path
                config.save()
                aircraft_path_error = None

        error_list += filter(None, [aircraft_path_error])
        error_list += filter(None,
                             [self._check_path_set_and_exists('terrasync', allow_none=True)])

        if len(error_list) > 0:
            return error_list, memo

        fgfs_path = config.fgfs_path

        try:
            logging.info("Starting FGFS to find version")
            version_result = subprocess.run(
                [f"{fgfs_path}", '--version'],
                capture_output=True,
                text=True,
                timeout=3,
                env=config.assemble_fgfs_env_vars()[1]
            )
        except (OSError, subprocess.TimeoutExpired) as e:
            error_list.append(
                types.Error(
                    code=types.ErrorCode.FG_VERSION_CHECK_FAILED,
                    description=textwrap.dedent(f"""\
                        {e}

                        Failed to retrieve version. Check paths.
                        FGFS_PATH ({config.fgfs_path}) should point to the FGFS executable
                        FG_ROOT ({config.fgroot_path}) should point to read-only FlightGear files.
                        FG_HOME ({config.fghome_path}) should point to read/write user-specific FlightGear data
                    """)
                )
            )
        else:
            logging.debug(f"Version result: {version_result}")
            match = re.search(r'^.*FlightGear version: (.*)\n',
                              version_result.stdout)
            version_frags = [int(x) for x in match[1].split('.')]
            version_obj = types.Version(
                major=version_frags[0], minor=version_frags[1], patch=version_frags[2])
            memo['version'] = version_obj
            memo[
                'aircraft_svn_base_url'] = f"https://svn.code.sf.net/p/flightgear/fgaddon/branches/release-{version_obj.major}.{version_obj.minor}/Aircraft"

        return error_list, memo

    def _check_path_set_and_exists(self, selector, allow_none=False):
        key = f"{selector}_path"
        value = getattr(self._config, key, None)
        logging.debug(
            f"_check_path_set_and_exists key={key} value={value} allow_none={allow_none}")

        if allow_none and value is None:
            return None

        if value is None:
            return types.Error(code=types.ErrorCode[f'{selector.upper()}_PATH_NOT_SET'])

        if not Path(value).exists():
            return types.Error(
                code=types.ErrorCode[f'{selector.upper()}_PATH_NOT_EXIST'],
                description=f"Could not locate path '{value}'"
            )

        return None

    def _shutdown(self):
        with self._context_lock:
            self._context['running'] = False

        if self._check_status_thread.is_alive():
            logging.info("Waiting to status checker to quit...")
            self._check_status_thread.join(5)

        if self._zeroconf_enabled:
            logging.info("Unregistering service")
            self._m_zeroconf.unregister_service(self._zeroconf_info)
            self._m_zeroconf.close()

    def _create_app(self):
        app = Flask(__name__)

        app.add_url_rule(
            '/graphql',
            view_func=GraphQLView.as_view(
                'graphql',
                schema=schema.Schema,
                graphiql=True,
                get_context=lambda: self._context
            )
        )

        return app
Ejemplo n.º 32
0
class NetworkPrinterOutputDevicePlugin(QObject, OutputDevicePlugin):
    def __init__(self):
        super().__init__()
        self._zero_conf = None
        self._browser = None
        self._printers = {}
        self._cluster_printers_seen = {
        }  # do not forget a cluster printer when we have seen one, to not 'downgrade' from Connect to legacy printer

        self._api_version = "1"
        self._api_prefix = "/api/v" + self._api_version + "/"
        self._cluster_api_version = "1"
        self._cluster_api_prefix = "/cluster-api/v" + self._cluster_api_version + "/"

        self._network_manager = QNetworkAccessManager()
        self._network_manager.finished.connect(self._onNetworkRequestFinished)

        # List of old printer names. This is used to ensure that a refresh of zeroconf does not needlessly forces
        # authentication requests.
        self._old_printers = []

        # Because the model needs to be created in the same thread as the QMLEngine, we use a signal.
        self.addPrinterSignal.connect(self.addPrinter)
        self.removePrinterSignal.connect(self.removePrinter)
        Application.getInstance().globalContainerStackChanged.connect(
            self.reCheckConnections)

        # Get list of manual printers from preferences
        self._preferences = Preferences.getInstance()
        self._preferences.addPreference(
            "um3networkprinting/manual_instances",
            "")  #  A comma-separated list of ip adresses or hostnames
        self._manual_instances = self._preferences.getValue(
            "um3networkprinting/manual_instances").split(",")

        self._network_requests_buffer = {
        }  # store api responses until data is complete

    addPrinterSignal = Signal()
    removePrinterSignal = Signal()
    printerListChanged = Signal()

    ##  Start looking for devices on network.
    def start(self):
        self.startDiscovery()

    def startDiscovery(self):
        self.stop()
        if self._browser:
            self._browser.cancel()
            self._browser = None
            self._old_printers = [
                printer_name for printer_name in self._printers
            ]
            self._printers = {}
            self.printerListChanged.emit()
        # After network switching, one must make a new instance of Zeroconf
        # On windows, the instance creation is very fast (unnoticable). Other platforms?
        self._zero_conf = Zeroconf()
        self._browser = ServiceBrowser(self._zero_conf,
                                       u'_ultimaker._tcp.local.',
                                       [self._onServiceChanged])

        # Look for manual instances from preference
        for address in self._manual_instances:
            if address:
                self.addManualPrinter(address)

    def addManualPrinter(self, address):
        if address not in self._manual_instances:
            self._manual_instances.append(address)
            self._preferences.setValue("um3networkprinting/manual_instances",
                                       ",".join(self._manual_instances))

        instance_name = "manual:%s" % address
        properties = {
            b"name": address.encode("utf-8"),
            b"address": address.encode("utf-8"),
            b"manual": b"true",
            b"incomplete": b"true"
        }

        if instance_name not in self._printers:
            # Add a preliminary printer instance
            self.addPrinter(instance_name, address, properties)

        self.checkManualPrinter(address)
        self.checkClusterPrinter(address)

    def removeManualPrinter(self, key, address=None):
        if key in self._printers:
            if not address:
                address = self._printers[key].ipAddress
            self.removePrinter(key)

        if address in self._manual_instances:
            self._manual_instances.remove(address)
            self._preferences.setValue("um3networkprinting/manual_instances",
                                       ",".join(self._manual_instances))

    def checkManualPrinter(self, address):
        # Check if a printer exists at this address
        # If a printer responds, it will replace the preliminary printer created above
        # origin=manual is for tracking back the origin of the call
        url = QUrl("http://" + address + self._api_prefix +
                   "system?origin=manual_name")
        name_request = QNetworkRequest(url)
        self._network_manager.get(name_request)

    def checkClusterPrinter(self, address):
        cluster_url = QUrl("http://" + address + self._cluster_api_prefix +
                           "printers/?origin=check_cluster")
        cluster_request = QNetworkRequest(cluster_url)
        self._network_manager.get(cluster_request)

    ##  Handler for all requests that have finished.
    def _onNetworkRequestFinished(self, reply):
        reply_url = reply.url().toString()
        status_code = reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)

        if reply.operation() == QNetworkAccessManager.GetOperation:
            address = reply.url().host()
            if "origin=manual_name" in reply_url:  # Name returned from printer.
                if status_code == 200:

                    try:
                        system_info = json.loads(
                            bytes(reply.readAll()).decode("utf-8"))
                    except json.JSONDecodeError:
                        Logger.log("e", "Printer returned invalid JSON.")
                        return
                    except UnicodeDecodeError:
                        Logger.log("e", "Printer returned incorrect UTF-8.")
                        return

                    if address not in self._network_requests_buffer:
                        self._network_requests_buffer[address] = {}
                    self._network_requests_buffer[address][
                        "system"] = system_info
            elif "origin=check_cluster" in reply_url:
                if address not in self._network_requests_buffer:
                    self._network_requests_buffer[address] = {}
                if status_code == 200:
                    # We know it's a cluster printer
                    Logger.log("d", "Cluster printer detected: [%s]",
                               reply.url())
                    self._network_requests_buffer[address]["cluster"] = True
                else:
                    Logger.log("d",
                               "This url is not from a cluster printer: [%s]",
                               reply.url())
                    self._network_requests_buffer[address]["cluster"] = False

            # Both the system call and cluster call are finished
            if (address in self._network_requests_buffer
                    and "system" in self._network_requests_buffer[address]
                    and "cluster" in self._network_requests_buffer[address]):

                instance_name = "manual:%s" % address
                system_info = self._network_requests_buffer[address]["system"]
                is_cluster = self._network_requests_buffer[address]["cluster"]
                machine = "unknown"
                if "variant" in system_info:
                    variant = system_info["variant"]
                    if variant == "Ultimaker 3":
                        machine = "9066"
                    elif variant == "Ultimaker 3 Extended":
                        machine = "9511"

                properties = {
                    b"name": system_info["name"].encode("utf-8"),
                    b"address": address.encode("utf-8"),
                    b"firmware_version":
                    system_info["firmware"].encode("utf-8"),
                    b"manual": b"true",
                    b"machine": machine.encode("utf-8")
                }
                if instance_name in self._printers:
                    # Only replace the printer if it is still in the list of (manual) printers
                    self.removePrinter(instance_name)
                    self.addPrinter(instance_name,
                                    address,
                                    properties,
                                    force_cluster=is_cluster)

                del self._network_requests_buffer[address]

    ##  Stop looking for devices on network.
    def stop(self):
        if self._zero_conf is not None:
            Logger.log("d", "zeroconf close...")
            self._zero_conf.close()

    def getPrinters(self):
        return self._printers

    def reCheckConnections(self):
        active_machine = Application.getInstance().getGlobalContainerStack()
        if not active_machine:
            return

        for key in self._printers:
            if key == active_machine.getMetaDataEntry("um_network_key"):
                if not self._printers[key].isConnected():
                    Logger.log("d", "Connecting [%s]..." % key)
                    self._printers[key].connect()
                    self._printers[key].connectionStateChanged.connect(
                        self._onPrinterConnectionStateChanged)
            else:
                if self._printers[key].isConnected():
                    Logger.log("d", "Closing connection [%s]..." % key)
                    self._printers[key].close()
                    self._printers[key].connectionStateChanged.disconnect(
                        self._onPrinterConnectionStateChanged)

    ##  Because the model needs to be created in the same thread as the QMLEngine, we use a signal.
    def addPrinter(self, name, address, properties, force_cluster=False):
        cluster_size = int(properties.get(b"cluster_size", -1))
        was_cluster_before = name in self._cluster_printers_seen
        if was_cluster_before:
            Logger.log(
                "d",
                "Printer [%s] had Cura Connect before, so assume it's still equipped with Cura Connect.",
                name)
        if force_cluster or cluster_size >= 0 or was_cluster_before:
            printer = NetworkClusterPrinterOutputDevice.NetworkClusterPrinterOutputDevice(
                name, address, properties, self._api_prefix, self._plugin_path)
        else:
            printer = NetworkPrinterOutputDevice.NetworkPrinterOutputDevice(
                name, address, properties, self._api_prefix)
        self._printers[printer.getKey()] = printer
        self._cluster_printers_seen[printer.getKey(
        )] = name  # Cluster printers that may be temporary unreachable or is rebooted keep being stored here
        global_container_stack = Application.getInstance(
        ).getGlobalContainerStack()
        if global_container_stack and printer.getKey(
        ) == global_container_stack.getMetaDataEntry("um_network_key"):
            if printer.getKey(
            ) not in self._old_printers:  # Was the printer already connected, but a re-scan forced?
                Logger.log("d",
                           "addPrinter, connecting [%s]..." % printer.getKey())
                self._printers[printer.getKey()].connect()
                printer.connectionStateChanged.connect(
                    self._onPrinterConnectionStateChanged)
        self.printerListChanged.emit()

    def removePrinter(self, name):
        printer = self._printers.pop(name, None)
        if printer:
            if printer.isConnected():
                printer.disconnect()
                printer.connectionStateChanged.disconnect(
                    self._onPrinterConnectionStateChanged)
                Logger.log("d", "removePrinter, disconnecting [%s]..." % name)
        self.printerListChanged.emit()

    ##  Handler for when the connection state of one of the detected printers changes
    def _onPrinterConnectionStateChanged(self, key):
        if key not in self._printers:
            return
        if self._printers[key].isConnected():
            self.getOutputDeviceManager().addOutputDevice(self._printers[key])
        else:
            self.getOutputDeviceManager().removeOutputDevice(key)

    ##  Handler for zeroConf detection
    def _onServiceChanged(self, zeroconf, service_type, name, state_change):
        if state_change == ServiceStateChange.Added:
            Logger.log("d", "Bonjour service added: %s" % name)

            # First try getting info from zeroconf cache
            info = ServiceInfo(service_type, name, properties={})
            for record in zeroconf.cache.entries_with_name(name.lower()):
                info.update_record(zeroconf, time.time(), record)

            for record in zeroconf.cache.entries_with_name(info.server):
                info.update_record(zeroconf, time.time(), record)
                if info.address:
                    break

            # Request more data if info is not complete
            if not info.address:
                Logger.log("d", "Trying to get address of %s", name)
                info = zeroconf.get_service_info(service_type, name)

            if info:
                type_of_device = info.properties.get(b"type", None)
                if type_of_device:
                    if type_of_device == b"printer":
                        address = '.'.join(map(lambda n: str(n), info.address))
                        self.addPrinterSignal.emit(str(name), address,
                                                   info.properties)
                    else:
                        Logger.log(
                            "w",
                            "The type of the found device is '%s', not 'printer'! Ignoring.."
                            % type_of_device)
            else:
                Logger.log("w", "Could not get information about %s" % name)

        elif state_change == ServiceStateChange.Removed:
            Logger.log("d", "Bonjour service removed: %s" % name)
            self.removePrinterSignal.emit(str(name))

    ##  For cluster below
    def _get_plugin_directory_name(self):
        current_file_absolute_path = os.path.realpath(__file__)
        directory_path = os.path.dirname(current_file_absolute_path)
        _, directory_name = os.path.split(directory_path)
        return directory_name

    @property
    def _plugin_path(self):
        return PluginRegistry.getInstance().getPluginPath(
            self._get_plugin_directory_name())

    @pyqtSlot()
    def openControlPanel(self):
        Logger.log("d", "Opening print jobs web UI...")
        selected_device = self.getOutputDeviceManager().getActiveDevice()
        if isinstance(
                selected_device, NetworkClusterPrinterOutputDevice.
                NetworkClusterPrinterOutputDevice):
            QDesktopServices.openUrl(QUrl(selected_device.getPrintJobsUrl()))
Ejemplo n.º 33
0
class Backend(Thread, EventGenerator):
    """
    Parent class for backends.

    A backend is basically a thread that checks for new events on some channel
    (e.g. a network socket, a queue, some new entries on an API endpoint or an
    RSS feed, a voice command through an assistant, a new measure from a sensor
    etc.) and propagates event messages to the main application bus whenever a
    new event happens. You can then build whichever type of custom logic you
    want on such events.
    """

    _default_response_timeout = 5

    # Loop function, can be implemented by derived classes
    loop = None

    def __init__(self,
                 bus: Optional[Bus] = None,
                 poll_seconds: Optional[float] = None,
                 **kwargs):
        """
        :param bus: Reference to the bus object to be used in the backend
        :param poll_seconds: If the backend implements a ``loop`` method, this parameter expresses how often the
            loop should run in seconds.
        :param kwargs: Key-value configuration for the backend
        """

        self._thread_name = self.__class__.__name__
        EventGenerator.__init__(self)
        Thread.__init__(self, name=self._thread_name, daemon=True)

        # If no bus is specified, create an internal queue where
        # the received messages will be pushed
        self.bus = bus or Bus()
        self.poll_seconds = float(poll_seconds) if poll_seconds else None
        self.device_id = Config.get('device_id')
        self.thread_id = None
        self._should_stop = False
        self._stop_event = threading.Event()
        self._kwargs = kwargs
        self.logger = logging.getLogger(
            'platypush:backend:' + get_backend_name_by_class(self.__class__))
        self.zeroconf = None
        self.zeroconf_info = None

        # Internal-only, we set the request context on a backend if that
        # backend is intended to react for a response to a specific request
        self._request_context = kwargs['_req_ctx'] if '_req_ctx' in kwargs \
            else None

        if 'logging' in kwargs:
            self.logger.setLevel(
                getattr(logging,
                        kwargs.get('logging').upper()))

    def on_message(self, msg):
        """
        Callback when a message is received on the backend.
        It parses and posts the message on the main bus.
        It should be called by the derived classes whenever
        a new message should be processed.

        :param msg: Received message.  It can be either a key-value dictionary, a platypush.message.Message object,
            or a string/byte UTF-8 encoded string
        """

        msg = Message.build(msg)

        if not getattr(msg, 'target') or msg.target != self.device_id:
            return  # Not for me

        self.logger.debug('Message received on the {} backend: {}'.format(
            self.__class__.__name__, msg))

        if self._is_expected_response(msg):
            # Expected response, trigger the response handler
            clear_timeout()
            self._request_context['on_response'](msg)
            self.stop()
            return

        if isinstance(msg, StopEvent) and msg.targets_me():
            self.logger.info('Received STOP event on {}'.format(
                self.__class__.__name__))
            self._should_stop = True
        else:
            msg.backend = self  # Augment message to be able to process responses
            self.bus.post(msg)

    def _is_expected_response(self, msg):
        """ Internal only - returns true if we are expecting for a response
            and msg is that response """

        return self._request_context \
            and isinstance(msg, Response) \
            and msg.id == self._request_context['request'].id

    def _get_backend_config(self):
        config_name = 'backend.' + self.__class__.__name__.split(
            'Backend')[0].lower()
        return Config.get(config_name)

    def _setup_response_handler(self, request, on_response, response_timeout):
        def _timeout_hndl():
            raise RuntimeError(
                'Timed out while waiting for a response from {}'.format(
                    request.target))

        req_ctx = {
            'request': request,
            'on_response': on_response,
            'response_timeout': response_timeout,
        }

        resp_backend = self.__class__(bus=self.bus,
                                      _req_ctx=req_ctx,
                                      **self._get_backend_config(),
                                      **self._kwargs)

        # Set the response timeout
        if response_timeout:
            set_timeout(seconds=response_timeout, on_timeout=_timeout_hndl)

        resp_backend.start()

    def send_event(self, event, **kwargs):
        """
        Send an event message on the backend.

        :param event: Event to send. It can be a dict, a string/bytes UTF-8 JSON, or a platypush.message.event.Event
            object.
        """

        event = Event.build(event)
        assert isinstance(event, Event)

        event.origin = self.device_id
        if not hasattr(event, 'target'):
            event.target = self.device_id

        self.send_message(event, **kwargs)

    def send_request(self,
                     request,
                     on_response=None,
                     response_timeout=_default_response_timeout,
                     **kwargs):
        """
        Send a request message on the backend.

        :param request: The request, either a dict, a string/bytes UTF-8 JSON, or a platypush.message.request.Request
            object.

        :param on_response: Optional callback that will be called when a response is received. If set, this method will
            synchronously wait for a response before exiting.
        :type on_response: function

        :param response_timeout: If on_response is set, the backend will raise an exception if the response isn't
            received within this number of seconds (default: None)
        :type response_timeout: float
        """

        request = Request.build(request)
        assert isinstance(request, Request)

        request.origin = self.device_id

        if on_response and response_timeout != 0:
            self._setup_response_handler(request, on_response,
                                         response_timeout)

        self.send_message(request, **kwargs)

    def send_response(self, response, request, **kwargs):
        """
        Send a response message on the backend.

        :param response: The response, either a dict, a string/bytes UTF-8 JSON, or a
            :class:`platypush.message.response.Response` object.
        :param request: Associated request, used to set the response parameters that will link them
        """

        assert isinstance(response, Response)
        assert isinstance(request, Request)

        self.send_message(response, **kwargs)

    def send_message(self, msg, queue_name=None, **kwargs):
        """
        Sends a platypush.message.Message to a node.
        To be implemented in the derived classes. By default, if the Redis
        backend is configured then it will try to deliver the message to
        other consumers through the configured Redis main queue.

        :param msg: The message to send
        :param queue_name: Send the message on a specific queue (default: the queue_name configured on the Redis
            backend)
        """

        try:
            redis = get_backend('redis')
            if not redis:
                raise KeyError()
        except KeyError:
            self.logger.warning(
                "Backend {} does not implement send_message " +
                "and the fallback Redis backend isn't configured")
            return

        redis.send_message(msg, queue_name=queue_name)

    def run(self):
        """ Starts the backend thread. To be implemented in the derived classes if the loop method isn't defined. """
        self.thread_id = threading.get_ident()
        set_thread_name(self._thread_name)
        if not callable(self.loop):
            return

        while not self.should_stop():
            try:
                with self:
                    has_error = False

                    while not self.should_stop() and not has_error:
                        try:
                            self.loop()
                        except Exception as e:
                            has_error = True
                            self.logger.error(str(e))
                            self.logger.exception(e)
                        finally:
                            if self.poll_seconds:
                                time.sleep(self.poll_seconds)
                            elif has_error:
                                time.sleep(5)
            except Exception as e:
                self.logger.error('{} initialization error: {}'.format(
                    self.__class__.__name__, str(e)))
                self.logger.exception(e)
                time.sleep(self.poll_seconds or 5)

    def __enter__(self):
        """ Invoked when the backend is initialized, if the main logic is within a ``loop()`` function """
        self.logger.info('Initialized backend {}'.format(
            self.__class__.__name__))

    def __exit__(self, exc_type, exc_val, exc_tb):
        """ Invoked when the backend is terminated, if the main logic is within a ``loop()`` function """
        self.on_stop()
        self.logger.info('Terminated backend {}'.format(
            self.__class__.__name__))

    def on_stop(self):
        """ Callback invoked when the process stops """
        self.unregister_service()

    def stop(self):
        """ Stops the backend thread by sending a STOP event on its bus """
        def _async_stop():
            evt = StopEvent(target=self.device_id,
                            origin=self.device_id,
                            thread_id=self.thread_id)

            self.send_message(evt)
            self._stop_event.set()
            self.on_stop()

        Thread(target=_async_stop).start()

    def should_stop(self):
        return self._should_stop

    def wait_stop(self, timeout=None) -> bool:
        return self._stop_event.wait(timeout)

    def _get_redis(self):
        import redis

        redis_backend = get_backend('redis')
        if not redis_backend:
            self.logger.warning(
                'Redis backend not configured - some ' +
                'web server features may not be working properly')
            redis_args = {}
        else:
            redis_args = redis_backend.redis_args

        redis = redis.Redis(**redis_args)
        return redis

    def get_message_response(self, msg):
        try:
            redis = self._get_redis()
            response = redis.blpop(get_redis_queue_name_by_message(msg),
                                   timeout=60)
            if response and len(response) > 1:
                response = Message.build(response[1])
            else:
                response = None

            return response
        except Exception as e:
            self.logger.error(
                'Error while processing response to {}: {}'.format(
                    msg, str(e)))

    @staticmethod
    def _get_ip() -> str:
        """
        Get the IP address of the machine.
        """
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        s.connect(('8.8.8.8', 80))
        addr = s.getsockname()[0]
        s.close()
        return addr

    def register_service(self,
                         port: Optional[int] = None,
                         name: Optional[str] = None,
                         udp: bool = False):
        """
        Initialize the Zeroconf service configuration for this backend.
        """
        try:
            from zeroconf import ServiceInfo, Zeroconf
            from platypush.plugins.zeroconf import ZeroconfListener
        except ImportError:
            self.logger.warning(
                'zeroconf package not available, service discovery will be disabled.'
            )
            return

        self.zeroconf = Zeroconf()
        srv_desc = {
            'name': 'Platypush',
            'vendor': 'Platypush',
            'version': __version__,
        }

        name = name or re.sub(r'Backend$', '', self.__class__.__name__).lower()
        srv_type = '_platypush-{name}._{proto}.local.'.format(
            name=name, proto='udp' if udp else 'tcp')
        srv_name = '{host}.{type}'.format(host=self.device_id, type=srv_type)

        if port:
            srv_port = port
        else:
            srv_port = self.port if hasattr(self, 'port') else None

        self.zeroconf_info = ServiceInfo(
            srv_type,
            srv_name,
            addresses=[socket.inet_aton(self._get_ip())],
            port=srv_port,
            weight=0,
            priority=0,
            properties=srv_desc)

        self.zeroconf.register_service(self.zeroconf_info)
        self.bus.post(
            ZeroconfServiceAddedEvent(
                service_type=srv_type,
                service_name=srv_name,
                service_info=ZeroconfListener.parse_service_info(
                    self.zeroconf_info)))

    def unregister_service(self):
        """
        Unregister the Zeroconf service configuration if available.
        """
        if self.zeroconf and self.zeroconf_info:
            self.zeroconf.unregister_service(self.zeroconf_info)
            if self.zeroconf:
                self.zeroconf.close()

            if self.zeroconf_info:
                self.bus.post(
                    ZeroconfServiceRemovedEvent(
                        service_type=self.zeroconf_info.type,
                        service_name=self.zeroconf_info.name))
            else:
                self.bus.post(
                    ZeroconfServiceRemovedEvent(service_type=None,
                                                service_name=None))

            self.zeroconf_info = None
            self.zeroconf = None
Ejemplo n.º 34
0
class WebThingServer:
    """Server to represent a Web Thing over HTTP."""
    def __init__(self,
                 things,
                 port=80,
                 hostname=None,
                 ssl_options=None,
                 additional_routes=None,
                 base_path=''):
        """
        Initialize the WebThingServer.

        For documentation on the additional route format, see:
        https://www.tornadoweb.org/en/stable/web.html#tornado.web.Application

        things -- things managed by this server -- should be of type
                  SingleThing or MultipleThings
        port -- port to listen on (defaults to 80)
        hostname -- Optional host name, i.e. mything.com
        ssl_options -- dict of SSL options to pass to the tornado server
        additional_routes -- list of additional routes to add to the server
        base_path -- base URL path to use, rather than '/'
        """
        self.things = things
        self.name = things.get_name()
        self.port = port
        self.hostname = hostname
        self.base_path = base_path.rstrip('/')

        system_hostname = socket.gethostname().lower()
        self.hosts = [
            'localhost',
            'localhost:{}'.format(self.port),
            '{}.local'.format(system_hostname),
            '{}.local:{}'.format(system_hostname, self.port),
        ]

        for address in get_addresses():
            self.hosts.extend([
                address,
                '{}:{}'.format(address, self.port),
            ])

        if self.hostname is not None:
            self.hostname = self.hostname.lower()
            self.hosts.extend([
                self.hostname,
                '{}:{}'.format(self.hostname, self.port),
            ])

        if isinstance(self.things, MultipleThings):
            for idx, thing in enumerate(self.things.get_things()):
                thing.set_href_prefix('{}/{}'.format(self.base_path, idx))

            handlers = [
                [
                    r'/?',
                    ThingsHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/(?P<thing_id>\d+)/?',
                    ThingHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/(?P<thing_id>\d+)/properties/?',
                    PropertiesHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/(?P<thing_id>\d+)/properties/' +
                    r'(?P<property_name>[^/]+)/?',
                    PropertyHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/(?P<thing_id>\d+)/actions/?',
                    ActionsHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/(?P<thing_id>\d+)/actions/(?P<action_name>[^/]+)/?',
                    ActionHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/(?P<thing_id>\d+)/actions/' +
                    r'(?P<action_name>[^/]+)/(?P<action_id>[^/]+)/?',
                    ActionIDHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/(?P<thing_id>\d+)/events/?',
                    EventsHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/(?P<thing_id>\d+)/events/(?P<event_name>[^/]+)/?',
                    EventHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
            ]
        else:
            self.things.get_thing().set_href_prefix(self.base_path)
            handlers = [
                [
                    r'/?',
                    ThingHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/properties/?',
                    PropertiesHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/properties/(?P<property_name>[^/]+)/?',
                    PropertyHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/actions/?',
                    ActionsHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/actions/(?P<action_name>[^/]+)/?',
                    ActionHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/actions/(?P<action_name>[^/]+)/(?P<action_id>[^/]+)/?',
                    ActionIDHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/events/?',
                    EventsHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
                [
                    r'/events/(?P<event_name>[^/]+)/?',
                    EventHandler,
                    dict(things=self.things, hosts=self.hosts),
                ],
            ]

        if isinstance(additional_routes, list):
            handlers = additional_routes + handlers

        if self.base_path:
            for h in handlers:
                h[0] = self.base_path + h[0]

        self.app = tornado.web.Application(handlers)
        self.app.is_tls = ssl_options is not None
        self.server = tornado.httpserver.HTTPServer(self.app,
                                                    ssl_options=ssl_options)

    def start(self):
        """Start listening for incoming connections."""
        self.service_info = ServiceInfo(
            '_webthing._tcp.local.',
            '{}._webthing._tcp.local.'.format(self.name),
            address=socket.inet_aton(get_ip()),
            port=self.port,
            properties={
                'path': '/',
            },
            server='{}.local.'.format(socket.gethostname()))
        self.zeroconf = Zeroconf()
        self.zeroconf.register_service(self.service_info)

        self.server.listen(self.port)
        tornado.ioloop.IOLoop.current().start()

    def stop(self):
        """Stop listening."""
        self.zeroconf.unregister_service(self.service_info)
        self.zeroconf.close()
        self.server.stop()
Ejemplo n.º 35
0
import atexit
import json
import random
import socket
from typing import cast

from zeroconf import ServiceBrowser, ServiceStateChange, Zeroconf, ServiceInfo, NonUniqueNameException

from mxdc import Object, Signal
from mxdc.utils.log import get_module_logger
from mxdc.utils.misc import get_address

# get logging object
logger = get_module_logger(__name__)

ZCONF = Zeroconf()

Collision = NonUniqueNameException


class SimpleProvider(object):
    """
    Multi-cast DNS Service Provider

    Provide a multicast DNS service with the given name and type listening on the given
    port with additional information in the data record.

    :param name: Name of service
    :param service_type: Service Type string
    :param port: Service port
Ejemplo n.º 36
0
            print("  Server: %s" % (info.server, ))
            if info.properties:
                print("  Properties are:")
                for key, value in info.properties.items():
                    print("    %s: %s" % (key, value))
            else:
                print("  No properties")
        else:
            print("  No info")
        print('\n')


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)
    if len(sys.argv) > 1:
        assert sys.argv[1:] == ['--debug']
        logging.getLogger('zeroconf').setLevel(logging.DEBUG)

    zeroconf = Zeroconf()
    print("\nBrowsing services, press Ctrl-C to exit...\n")
    browser = ServiceBrowser(zeroconf,
                             "_http._tcp.local.",
                             handlers=[on_service_state_change])

    try:
        while True:
            sleep(0.1)
    except KeyboardInterrupt:
        pass
    finally:
        zeroconf.close()
Ejemplo n.º 37
0
    def add_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
        logger = self.logger.getChild(name.split('.', 2)[0])
        with self.lock:
            info = zeroconf.get_service_info(type, name)
            logger.debug(
                'WGServiceListener add_service %s', type)
            if not info:
                logger.warn('Missing info')
                return
            if not WGServiceInfo.authenticate(info, self.psk):
                logger.warn('Failed to authenticate remote with psk hash')
                return
            props: Dict[bytes, bytes] = info.properties
            addrs: List[TIfaceAddress] = [
                ipaddress.ip_address(addr)
                for addr in info.addresses
            ]
            _internal_addr = props.get(b'addr', b'').decode('utf-8')
            internal_addr = None
            if _internal_addr:
                internal_addr = ipaddress.ip_interface(_internal_addr)
            pubkey = props.get(b'pubkey', b'').decode('utf-8')
            hostname = props.get(b'hostname', b'').decode('utf-8')
            if not internal_addr or not pubkey:
                logger.warn('Service does not have requisite properties')
                return
            if internal_addr.ip == self.my_address.ip:
                logger.warn('Service has same internal ip address')
                return
            if internal_addr.ip not in self.my_prefix:
                logger.warn('Service is not a subnet of our prefix')
                return
            logger.info(
                'Found remote. name "%s" pubkey "%s" addrs %s port %d',
                name,
                pubkey,
                addrs,
                info.port,
            )
            if pubkey in self.peers:
                return
            # if self.peers.get(pubkey, None) == : return
            # if pubkey in self.iface.wg.get_interface(IFACE).peers: return
            for addr in addrs:
                addr = ipaddress.ip_address(addr)
                if addr.is_link_local:
                    continue
                endpoint = addr.compressed
                if addr.version == 6:
                    endpoint = f'[{endpoint}]'
                endpoint = f'{endpoint}:{info.port}'

                (WGProc('set', self.wg_zero.wg_iface.ifname)
                    .args([
                        'peer', pubkey,
                        'preshared-key', '/dev/stdin',
                        'endpoint', endpoint,
                        'persistent-keepalive', '5',
                        'allowed-ips',
                        ','.join([
                            internal_addr.ip.compressed,
                            # Apparently we cannot add the same addr to
                            # multiple peers
                            # self.my_prefix.broadcast_address.compressed,
                        ])
                    ])
                    .input(self.psk)
                    .run())

                self.peers[pubkey] = addr
                zw_hostname = hostname + '.zerowire.'
                self.wg_zero.wg_iface.global_dns.add_addr_record(
                    zw_hostname,
                    internal_addr.ip)
Ejemplo n.º 38
0
def test_known_answer_supression():
    zc = Zeroconf(interfaces=['127.0.0.1'])
    type_ = "_knownservice._tcp.local."
    name = "knownname"
    registration_name = "%s.%s" % (name, type_)
    desc = {'path': '/~paulsm/'}
    server_name = "ash-2.local."
    info = ServiceInfo(type_,
                       registration_name,
                       80,
                       0,
                       0,
                       desc,
                       server_name,
                       addresses=[socket.inet_aton("10.0.1.2")])
    zc.register_service(info)

    now = current_time_millis()
    _clear_cache(zc)
    # Test PTR supression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(packets[0]), "1.2.3.4", const._MDNS_PORT)
    assert unicast_out is None
    assert multicast_out is not None and multicast_out.answers

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    generated.add_answer_at_time(info.dns_pointer(), now)
    packets = generated.packets()
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(packets[0]), "1.2.3.4", const._MDNS_PORT)
    assert unicast_out is None
    # If the answer is suppressed, the additional should be suppresed as well
    assert not multicast_out or not multicast_out.answers

    # Test A supression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(server_name, const._TYPE_A, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(packets[0]), "1.2.3.4", const._MDNS_PORT)
    assert unicast_out is None
    assert multicast_out is not None and multicast_out.answers

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(server_name, const._TYPE_A, const._CLASS_IN)
    generated.add_question(question)
    for dns_address in info.dns_addresses():
        generated.add_answer_at_time(dns_address, now)
    packets = generated.packets()
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(packets[0]), "1.2.3.4", const._MDNS_PORT)
    assert unicast_out is None
    assert not multicast_out or not multicast_out.answers

    # Test SRV supression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(registration_name, const._TYPE_SRV,
                             const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(packets[0]), "1.2.3.4", const._MDNS_PORT)
    assert unicast_out is None
    assert multicast_out is not None and multicast_out.answers

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(registration_name, const._TYPE_SRV,
                             const._CLASS_IN)
    generated.add_question(question)
    generated.add_answer_at_time(info.dns_service(), now)
    packets = generated.packets()
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(packets[0]), "1.2.3.4", const._MDNS_PORT)
    assert unicast_out is None
    # If the answer is suppressed, the additional should be suppresed as well
    assert not multicast_out or not multicast_out.answers

    # Test TXT supression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(registration_name, const._TYPE_TXT,
                             const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(packets[0]), "1.2.3.4", const._MDNS_PORT)
    assert unicast_out is None
    assert multicast_out is not None and multicast_out.answers

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(registration_name, const._TYPE_TXT,
                             const._CLASS_IN)
    generated.add_question(question)
    generated.add_answer_at_time(info.dns_text(), now)
    packets = generated.packets()
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(packets[0]), "1.2.3.4", const._MDNS_PORT)
    assert unicast_out is None
    assert not multicast_out or not multicast_out.answers

    # unregister
    zc.unregister_service(info)
    zc.close()
Ejemplo n.º 39
0
def initialize_zeroconf_listener():
    ZEROCONF_STATE["zeroconf"] = Zeroconf()
    ZEROCONF_STATE["listener"] = KolibriZeroconfListener()
    ZEROCONF_STATE["zeroconf"].add_service_listener(SERVICE_TYPE,
                                                    ZEROCONF_STATE["listener"])
Ejemplo n.º 40
0
async def test_async_with_sync_passed_in() -> None:
    """Test we can create and close the instance when passing in a sync Zeroconf."""
    zc = Zeroconf(interfaces=['127.0.0.1'])
    aiozc = AsyncZeroconf(zc=zc)
    assert aiozc.zeroconf is zc
    await aiozc.async_close()
Ejemplo n.º 41
0
    initial_motor_state = np.random.randint(0, 100 + 1)
    print("Initial motor state: %s" % initial_motor_state)
    save_motor_value(None, [None, motor_values], initial_motor_state)

    desc = {'actuator1': '/motor:0%100'}

    info = ServiceInfo(type_="_osc._udp.local.",
                       name="PythonActuator._osc._udp.local.",
                       address=socket.inet_aton(get_local_ip()),
                       port=3335,
                       weight=0,
                       priority=0,
                       properties=desc,
                       server="PythonActuator.local.")

    zeroconf = Zeroconf()
    print("Registration of a service PythonActuator")
    zeroconf.register_service(info)

    print("Opening a TCP connection")
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    print(get_local_ip())
    s.bind((str(get_local_ip()), 5555))
    s.listen()

    conn, addr = s.accept()
    print("Connection address:  " + str(addr))

    while True:
        data = conn.recv(20)
        if not data:
Ejemplo n.º 42
0
    def _zeroconf_handler(self, zeroconf: Zeroconf, service_type: str,
                          name: str, state_change: ServiceStateChange):
        if state_change == ServiceStateChange.Removed:
            _LOGGER.debug(f"Zeroconf Removed: {name}")
            # TTL of record 5 minutes
            deviceid = name[8:18]
            # _LOGGER.debug(f"{deviceid} <= Local2 | Zeroconf Removed Event")
            # check if device added
            if 'handlers' in self._devices[deviceid]:
                coro = self.check_offline(deviceid)
                self.loop.create_task(coro)
            return

        try:
            info = zeroconf.get_service_info(service_type, name)
            properties = {
                k.decode(): v.decode() if isinstance(v, bytes) else v
                for k, v in info.properties.items()
            }

            deviceid = properties['id']
            device = self._devices.setdefault(deviceid, {})

            log = f"{deviceid} <= Local{state_change.value}"

            if properties.get('encrypt'):
                devicekey = device.get('devicekey')
                if devicekey == 'skip':
                    return
                if not devicekey:
                    _LOGGER.info(f"{log} | No devicekey for device")
                    # skip device next time
                    device['devicekey'] = 'skip'
                    return

                data = decrypt(properties, devicekey)
                # Fix Sonoff RF Bridge sintax bug
                if data and data.startswith(b'{"rf'):
                    data = data.replace(b'"="', b'":"')
            else:
                data = ''.join([
                    properties[f'data{i}'] for i in range(1, 5, 1)
                    if f'data{i}' in properties
                ])

            try:
                state = json.loads(data)
            except:
                _LOGGER.debug(f"{log} !! Wrong JSON data: {data}")
                return
            seq = properties.get('seq')

            _LOGGER.debug(f"{log} | {state} | {seq}")

            # https://github.com/AlexxIT/SonoffLAN/issues/527
            if 'currentTemperature' in state:
                try:
                    state['temperature'] = float(state['currentTemperature'])
                except ValueError:
                    pass
            if 'currentHumidity' in state:
                try:
                    state['humidity'] = float(state['currentHumidity'])
                except ValueError:
                    pass

            # TH bug in local mode https://github.com/AlexxIT/SonoffLAN/issues/110
            if state.get('temperature') == 0 and state.get('humidity') == 0:
                del state['temperature'], state['humidity']

            if 'temperature' in state and self.sync_temperature:
                # cloud API send only one decimal (not round)
                state['temperature'] = int(state['temperature'] * 10) / 10.0

            if properties['type'] == 'fan_light':
                state = ifan03to02(state)
                device['uiid'] = 'fan_light'

            host = str(ipaddress.ip_address(info.addresses[0]))
            # update every time device host change (alsow first time)
            if device.get('host') != host:
                # state connection for attrs update
                state['local'] = 'online'
                # device host for local connection
                device['host'] = host
                # update or set device init state
                if 'params' in device:
                    device['params'].update(state)
                else:
                    device['params'] = state
                    # set uiid with: strip, plug, light, rf
                    device['uiid'] = properties['type']

            for handler in self._handlers:
                handler(deviceid, state, seq)

        except:
            _LOGGER.debug(
                f"Problem while processing zeroconf: {service_type}, {name}")
Ejemplo n.º 43
0
class DiscoveryPanel(wx.Panel, listmix.ColumnSorterMixin):

    def _init_coll_MainSizer_Items(self, parent):
        parent.AddWindow(self.staticText1,    0, border=20, flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.GROW)
        parent.AddWindow(self.ServicesList,   0, border=20, flag=wx.LEFT | wx.RIGHT | wx.GROW)
        parent.AddSizer(self.ButtonGridSizer, 0, border=20, flag=wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.GROW)

    def _init_coll_MainSizer_Growables(self, parent):
        parent.AddGrowableCol(0)
        parent.AddGrowableRow(1)

    def _init_coll_ButtonGridSizer_Items(self, parent):
        parent.AddWindow(self.RefreshButton, 0, border=0, flag=0)
        # parent.AddWindow(self.ByIPCheck, 0, border=0, flag=0)

    def _init_coll_ButtonGridSizer_Growables(self, parent):
        parent.AddGrowableCol(0)
        parent.AddGrowableRow(0)

    def _init_sizers(self):
        self.MainSizer = wx.FlexGridSizer(cols=1, hgap=0, rows=3, vgap=10)
        self.ButtonGridSizer = wx.FlexGridSizer(cols=2, hgap=5, rows=1, vgap=0)

        self._init_coll_MainSizer_Items(self.MainSizer)
        self._init_coll_MainSizer_Growables(self.MainSizer)
        self._init_coll_ButtonGridSizer_Items(self.ButtonGridSizer)
        self._init_coll_ButtonGridSizer_Growables(self.ButtonGridSizer)

        self.SetSizer(self.MainSizer)

    def _init_list_ctrl(self):
        # Set up list control
        listID = wx.NewId()
        self.ServicesList = AutoWidthListCtrl(
            id=listID,
            name='ServicesList', parent=self, pos=wx.Point(0, 0), size=wx.Size(0, 0),
            style=wx.LC_REPORT | wx.LC_EDIT_LABELS | wx.LC_SORT_ASCENDING | wx.LC_SINGLE_SEL)
        self.ServicesList.InsertColumn(0, _('NAME'))
        self.ServicesList.InsertColumn(1, _('TYPE'))
        self.ServicesList.InsertColumn(2, _('IP'))
        self.ServicesList.InsertColumn(3, _('PORT'))
        self.ServicesList.SetColumnWidth(0, 150)
        self.ServicesList.SetColumnWidth(1, 150)
        self.ServicesList.SetColumnWidth(2, 150)
        self.ServicesList.SetColumnWidth(3, 150)
        self.ServicesList.SetInitialSize(wx.Size(-1, 300))
        self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, id=listID)
        self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnItemActivated, id=listID)

    def _init_ctrls(self, prnt):
        self.staticText1 = wx.StaticText(
            label=_('Services available:'), name='staticText1', parent=self,
            pos=wx.Point(0, 0), size=wx.DefaultSize, style=0)

        self.RefreshButton = wx.Button(
            label=_('Refresh'), name='RefreshButton', parent=self,
            pos=wx.Point(0, 0), size=wx.DefaultSize, style=0)
        self.RefreshButton.Bind(wx.EVT_BUTTON, self.OnRefreshButton)

        # self.ByIPCheck = wx.CheckBox(self, label=_("Use IP instead of Service Name"))
        # self.ByIPCheck.SetValue(True)

        self._init_sizers()
        self.Fit()

    def __init__(self, parent):
        wx.Panel.__init__(self, parent)

        self.parent = parent

        self._init_list_ctrl()
        listmix.ColumnSorterMixin.__init__(self, 4)

        self._init_ctrls(parent)

        self.itemDataMap = {}
        self.nextItemId = 0

        self.URI = None
        self.Browser = None
        self.ZeroConfInstance = None

        self.RefreshList()
        self.LatestSelection = None

        self.IfacesMonitorState = None
        self.IfacesMonitorTimer = wx.Timer(self)
        self.IfacesMonitorTimer.Start(2000)
        self.Bind(wx.EVT_TIMER, self.IfacesMonitor, self.IfacesMonitorTimer)

    def __del__(self):
        self.IfacesMonitorTimer.Stop()
        self.Browser.cancel()
        self.ZeroConfInstance.close()

    def IfacesMonitor(self, event):
        NewState = get_all_addresses(socket.AF_INET)

        if self.IfacesMonitorState != NewState:
            if self.IfacesMonitorState is not None:
                # refresh only if a new address appeared
                for addr in NewState:
                    if addr not in self.IfacesMonitorState:
                        self.RefreshList()
                        break
            self.IfacesMonitorState = NewState
        event.Skip()

    def RefreshList(self):
        self.ServicesList.DeleteAllItems()
        if self.Browser is not None:
            self.Browser.cancel()
        if self.ZeroConfInstance is not None:
            self.ZeroConfInstance.close()
        self.ZeroConfInstance = Zeroconf()
        self.Browser = ServiceBrowser(self.ZeroConfInstance, service_type, self)

    def OnRefreshButton(self, event):
        self.RefreshList()

    # Used by the ColumnSorterMixin, see wx/lib/mixins/listctrl.py
    def GetListCtrl(self):
        return self.ServicesList

    def getColumnText(self, index, col):
        item = self.ServicesList.GetItem(index, col)
        return item.GetText()

    def OnItemSelected(self, event):
        self.SetURI(event.m_itemIndex)
        event.Skip()

    def OnItemActivated(self, event):
        self.SetURI(event.m_itemIndex)
        self.parent.EndModal(wx.ID_OK)
        event.Skip()

#    def SetURI(self, idx):
#        connect_type = self.getColumnText(idx, 1)
#        connect_address = self.getColumnText(idx, 2)
#        connect_port = self.getColumnText(idx, 3)
#
#        self.URI = "%s://%s:%s"%(connect_type, connect_address, connect_port)

    def SetURI(self, idx):
        self.LatestSelection = idx

    def GetURI(self):
        if self.LatestSelection is not None:
            # if self.ByIPCheck.IsChecked():
            svcname, scheme, host, port = \
                map(lambda col: self.getColumnText(self.LatestSelection, col),
                    range(4))
            return ("%s://%s:%s#%s" % (scheme, host, port, svcname)) \
                if scheme[-1] == "S" \
                else ("%s://%s:%s" % (scheme, host, port))
            # else:
            #     svcname = self.getColumnText(self.LatestSelection, 0)
            #     connect_type = self.getColumnText(self.LatestSelection, 1)
            #     return str("MDNS://%s" % svcname)
        return None

    def remove_service(self, zeroconf, _type, name):
        wx.CallAfter(self._removeService, name)

    def _removeService(self, name):
        '''
        called when a service with the desired type goes offline.
        '''

        # loop through the list items looking for the service that went offline
        for idx in xrange(self.ServicesList.GetItemCount()):
            # this is the unique identifier assigned to the item
            item_id = self.ServicesList.GetItemData(idx)

            # this is the full typename that was received by addService
            item_name = self.itemDataMap[item_id][4]

            if item_name == name:
                self.ServicesList.DeleteItem(idx)
                break

    def add_service(self, zeroconf, _type, name):
        wx.CallAfter(self._addService, _type, name)

    def _addService(self, _type, name):
        '''
        called when a service with the desired type is discovered.
        '''
        info = self.ZeroConfInstance.get_service_info(_type, name)
        if info is None:
            return
        svcname = name.split(".")[0]
        typename = info.properties.get("protocol", None)
        ip = str(socket.inet_ntoa(info.address))
        port = info.port

        num_items = self.ServicesList.GetItemCount()

        # display the new data in the list
        new_item = self.ServicesList.InsertStringItem(num_items, svcname)
        self.ServicesList.SetStringItem(new_item, 1, "%s" % typename)
        self.ServicesList.SetStringItem(new_item, 2, "%s" % ip)
        self.ServicesList.SetStringItem(new_item, 3, "%s" % port)

        # record the new data for the ColumnSorterMixin
        # we assign every list item a unique id (that won't change when items
        # are added or removed)
        self.ServicesList.SetItemData(new_item, self.nextItemId)

        # the value of each column has to be stored in the itemDataMap
        # so that ColumnSorterMixin knows how to sort the column.

        # "name" is included at the end so that self.removeService
        # can access it.
        self.itemDataMap[self.nextItemId] = [svcname, typename, ip, port, name]

        self.nextItemId += 1
Ejemplo n.º 44
0
async def appstart(loop):  # pylint: disable=too-many-branches,too-many-statements
    """Script starts here."""
    parser = argparse.ArgumentParser()
    parser.add_argument("--local-ip", default="127.0.0.1", help="local IP address")
    parser.add_argument(
        "--demo", default=False, action="store_true", help="enable demo mode"
    )
    parser.add_argument(
        "-d", "--debug", default=False, action="store_true", help="enable debug logs"
    )

    protocols = parser.add_argument_group("protocols")
    protocols.add_argument(
        "--mrp", default=False, action="store_true", help="enable MRP protocol"
    )
    protocols.add_argument(
        "--dmap", default=False, action="store_true", help="enable DMAP protocol"
    )
    protocols.add_argument(
        "--airplay", default=False, action="store_true", help="enable AirPlay protocol"
    )
    protocols.add_argument(
        "--companion",
        default=False,
        action="store_true",
        help="enable Companion protocol",
    )
    protocols.add_argument(
        "--raop",
        default=False,
        action="store_true",
        help="enable RAOP protocol",
    )
    args = parser.parse_args()

    if not (args.mrp or args.dmap or args.airplay or args.companion or args.raop):
        parser.error("no protocol enabled (see --help)")

    level = logging.DEBUG if args.debug else logging.WARNING
    logging.basicConfig(
        level=level,
        stream=sys.stdout,
        datefmt="%Y-%m-%d %H:%M:%S",
        format="%(asctime)s %(levelname)s: %(message)s",
    )

    tasks = []
    unpublishers = []
    zconf = Zeroconf()
    fake_atv = FakeAppleTV(loop, test_mode=False)
    if args.mrp:
        _, usecase = fake_atv.add_service(Protocol.MRP)
        if args.demo:
            tasks.append(asyncio.ensure_future(_alter_playing(usecase)))

    if args.dmap:
        _, usecase = fake_atv.add_service(
            Protocol.DMAP, hsgid=HSGID, pairing_guid=PAIRING_GUID, session_id=SESSION_ID
        )
        if args.demo:
            tasks.append(asyncio.ensure_future(_alter_playing(usecase)))

    if args.airplay:
        fake_atv.add_service(Protocol.AirPlay)

    if args.companion:
        fake_atv.add_service(Protocol.Companion)

    if args.raop:
        fake_atv.add_service(Protocol.RAOP)

    await fake_atv.start()

    if args.mrp:
        unpublishers.append(
            await publish_mrp_zeroconf(
                loop, zconf, args.local_ip, fake_atv.get_port(Protocol.MRP)
            )
        )

    if args.dmap:
        unpublishers.append(
            await publish_dmap_zeroconf(
                loop, zconf, args.local_ip, fake_atv.get_port(Protocol.DMAP)
            )
        )

    if args.airplay:
        unpublishers.append(
            await publish_airplay_zeroconf(
                loop, zconf, args.local_ip, fake_atv.get_port(Protocol.AirPlay)
            )
        )

    if args.companion:
        unpublishers.append(
            await publish_companion_zeroconf(
                loop, zconf, args.local_ip, fake_atv.get_port(Protocol.Companion)
            )
        )

    if args.raop:
        unpublishers.append(
            await publish_raop_zeroconf(
                loop, zconf, args.local_ip, fake_atv.get_port(Protocol.RAOP)
            )
        )

    print("Press ENTER to quit")
    await loop.run_in_executor(None, sys.stdin.readline)

    await fake_atv.stop()

    for task in tasks:
        task.cancel()

    for unpublisher in unpublishers:
        await unpublisher()

    print("Exiting")

    return 0
Ejemplo n.º 45
0
def setup(hass, config):
    """Set up Zeroconf and make Home Assistant discoverable."""
    zeroconf = Zeroconf()
    zeroconf_name = f"{hass.config.location_name}.{ZEROCONF_TYPE}"

    params = {
        "version": __version__,
        "base_url": hass.config.api.base_url,
        # Always needs authentication
        "requires_api_password": True,
    }

    host_ip = util.get_local_ip()

    try:
        host_ip_pton = socket.inet_pton(socket.AF_INET, host_ip)
    except OSError:
        host_ip_pton = socket.inet_pton(socket.AF_INET6, host_ip)

    info = ServiceInfo(
        ZEROCONF_TYPE,
        zeroconf_name,
        None,
        addresses=[host_ip_pton],
        port=hass.http.server_port,
        properties=params,
    )

    def zeroconf_hass_start(_event):
        """Expose Home Assistant on zeroconf when it starts.

        Wait till started or otherwise HTTP is not up and running.
        """
        _LOGGER.info("Starting Zeroconf broadcast")
        try:
            zeroconf.register_service(info)
        except NonUniqueNameException:
            _LOGGER.error(
                "Home Assistant instance with identical name present in the local network"
            )

    hass.bus.listen_once(EVENT_HOMEASSISTANT_START, zeroconf_hass_start)

    def service_update(zeroconf, service_type, name, state_change):
        """Service state changed."""
        if state_change != ServiceStateChange.Added:
            return

        service_info = zeroconf.get_service_info(service_type, name)
        info = info_from_service(service_info)
        _LOGGER.debug("Discovered new device %s %s", name, info)

        # If we can handle it as a HomeKit discovery, we do that here.
        if service_type == HOMEKIT_TYPE and handle_homekit(hass, info):
            return

        for domain in ZEROCONF[service_type]:
            hass.add_job(
                hass.config_entries.flow.async_init(domain,
                                                    context={"source": DOMAIN},
                                                    data=info))

    for service in ZEROCONF:
        ServiceBrowser(zeroconf, service, handlers=[service_update])

    if HOMEKIT_TYPE not in ZEROCONF:
        ServiceBrowser(zeroconf, HOMEKIT_TYPE, handlers=[service_update])

    def stop_zeroconf(_):
        """Stop Zeroconf."""
        zeroconf.close()

    hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_zeroconf)

    return True
Ejemplo n.º 46
0
class AccessoryDriver:
    """
    An AccessoryDriver mediates between incoming requests from the HAPServer and
    the Accessory.

    The driver starts and stops the HAPServer, the mDNS advertisements and responds
    to events from the HAPServer.
    """

    NUM_EVENTS_BEFORE_STATS = 100
    """Number of HAP send events to be processed before reporting statistics on
    the event queue length."""
    def __init__(self,
                 *,
                 address=None,
                 port=51234,
                 persist_file='accessory.state',
                 pincode=None,
                 encoder=None,
                 loader=None,
                 loop=None,
                 mac=None,
                 listen_address=None,
                 advertised_address=None,
                 interface_choice=None):
        """
        Initialize a new AccessoryDriver object.

        :param pincode: The pincode that HAP clients must prove they know in order
            to pair with this `Accessory`. Defaults to None, in which case a random
            pincode is generated. The pincode has the format "xxx-xx-xxx", where x is
            a digit.
        :type pincode: bytearray

        :param port: The local port on which the accessory will be accessible.
            In other words, this is the port of the HAPServer.
        :type port: int

        :param address: The local address on which the accessory will be accessible.
            In other words, this is the address of the HAPServer. If not given, the
            driver will try to select an address.
        :type address: str

        :param persist_file: The file name in which the state of the accessory
            will be persisted. This uses `expandvars`, so may contain `~` to
            refer to the user's home directory.
        :type persist_file: str

        :param encoder: The encoder to use when persisting/loading the Accessory state.
        :type encoder: AccessoryEncoder

        :param mac: The MAC address which will be used to identify the accessory.
            If not given, the driver will try to select a MAC address.
        :type mac: str

        :param listen_address: The local address on the HAPServer will listen.
            If not given, the value of the address parameter will be used.
        :type listen_address: str

        :param advertised_address: The address of the HAPServer announced via mDNS.
            This can be used to announce an external address from behind a NAT.
            If not given, the value of the address parameter will be used.
        :type advertised_address: str

        :param interface_choice: The zeroconf interfaces to listen on.
        :type InterfacesType: [InterfaceChoice.Default, InterfaceChoice.All]
        """
        if sys.platform == 'win32':
            self.loop = loop or asyncio.ProactorEventLoop()
        else:
            self.loop = loop or asyncio.new_event_loop()

        executor_opts = {'max_workers': None}
        if sys.version_info >= (3, 6):
            executor_opts['thread_name_prefix'] = 'SyncWorker'

        self.executor = ThreadPoolExecutor(**executor_opts)
        self.loop.set_default_executor(self.executor)

        self.accessory = None
        self.http_server_thread = None
        if interface_choice is not None:
            self.advertiser = Zeroconf(interfaces=interface_choice)
        else:
            self.advertiser = Zeroconf()
        self.persist_file = os.path.expanduser(persist_file)
        self.encoder = encoder or AccessoryEncoder()
        self.topics = {}  # topic: set of (address, port) of subscribed clients
        self.topic_lock = threading.Lock(
        )  # for exclusive access to the topics
        self.loader = loader or Loader()
        self.aio_stop_event = asyncio.Event(loop=self.loop)
        self.stop_event = threading.Event()
        self.event_queue = queue.Queue()  # (topic, bytes)
        self.send_event_thread = None  # the event dispatch thread
        self.sent_events = 0
        self.accumulated_qsize = 0

        self.safe_mode = False

        self.mdns_service_info = None
        self.srp_verifier = None

        address = address or util.get_local_address()
        advertised_address = advertised_address or address
        self.state = State(address=advertised_address,
                           mac=mac,
                           pincode=pincode,
                           port=port)

        listen_address = listen_address or address
        network_tuple = (listen_address, self.state.port)
        self.http_server = HAPServer(network_tuple, self)

    def start(self):
        """Start the event loop and call `_do_start`.

        Pyhap will be stopped gracefully on a KeyBoardInterrupt.
        """
        try:
            logger.info('Starting the event loop')
            if threading.current_thread() is threading.main_thread():
                logger.debug('Setting child watcher')
                watcher = asyncio.SafeChildWatcher()
                watcher.attach_loop(self.loop)
                asyncio.set_child_watcher(watcher)
            else:
                logger.debug(
                    'Not setting a child watcher. Set one if '
                    'subprocesses will be started outside the main thread.')
            self.add_job(self._do_start)
            self.loop.run_forever()
        except KeyboardInterrupt:
            logger.debug('Got a KeyboardInterrupt, stopping driver')
            self.loop.call_soon_threadsafe(self.loop.create_task,
                                           self.async_stop())
            self.loop.run_forever()
        finally:
            self.loop.close()
            logger.info('Closed the event loop')

    def _do_start(self):
        """Starts the accessory.

        - Call the accessory's run method.
        - Start handling accessory events.
        - Start the HAP server.
        - Publish a mDNS advertisement.
        - Print the setup QR code if the accessory is not paired.

        All of the above are started in separate threads. Accessory thread is set as
        daemon.
        """
        if self.accessory is None:
            raise ValueError("You must assign an accessory to the driver, "
                             "before you can start it.")
        logger.info('Starting accessory %s on address %s, port %s.',
                    self.accessory.display_name, self.state.address,
                    self.state.port)

        # Start sending events to clients. This is done in a daemon thread, because:
        # - if the queue is blocked waiting on an empty queue, then there is nothing left
        #   for clean up.
        # - if the queue is currently sending an event to the client, then, when it has
        #   finished, it will check the run sentinel, see that it is set and break the
        #   loop. Alternatively, the server's server_close method will shutdown and close
        #   the socket, while sending is in progress, which will result abort the sending.
        logger.debug('Starting event thread.')
        self.send_event_thread = threading.Thread(daemon=True,
                                                  target=self.send_events)
        self.send_event_thread.start()

        # Start listening for requests
        logger.debug('Starting server.')
        self.http_server_thread = threading.Thread(
            target=self.http_server.serve_forever)
        self.http_server_thread.start()

        # Advertise the accessory as a mDNS service.
        logger.debug('Starting mDNS.')
        self.mdns_service_info = AccessoryMDNSServiceInfo(
            self.accessory, self.state)
        self.advertiser.register_service(self.mdns_service_info)

        # Print accessory setup message
        if not self.state.paired:
            self.accessory.setup_message()

        # Start the accessory so it can do stuff.
        logger.debug('Starting accessory.')
        self.add_job(self.accessory.run)
        logger.debug('AccessoryDriver started successfully')

    def stop(self):
        """Method to stop pyhap."""
        self.loop.call_soon_threadsafe(self.loop.create_task,
                                       self.async_stop())

    async def async_stop(self):
        """Stops the AccessoryDriver and shutdown all remaining tasks."""
        await self.async_add_job(self._do_stop)
        logger.debug('Shutdown executors')
        self.executor.shutdown()
        self.loop.stop()
        logger.debug('Stop completed')

    def _do_stop(self):
        """Stop the accessory.

        1. Set the run sentinel.
        2. Call the stop method of the Accessory and wait for its thread to finish.
        3. Stop mDNS advertising.
        4. Stop HAP server.
        """
        # TODO: This should happen in a different order - mDNS, server, accessory. Need
        # to ensure that sending with a closed server will not crash the app.
        logger.info("Stopping accessory %s on address %s, port %s.",
                    self.accessory.display_name, self.state.address,
                    self.state.port)
        logger.debug(
            "Setting stop events, stopping accessory and event sending")
        self.stop_event.set()
        self.loop.call_soon_threadsafe(self.aio_stop_event.set)
        self.add_job(self.accessory.stop)

        logger.debug("Stopping mDNS advertising")
        self.advertiser.unregister_service(self.mdns_service_info)
        self.advertiser.close()

        logger.debug("Stopping HAP server")
        self.http_server.shutdown()
        self.http_server.server_close()
        self.http_server_thread.join()

        logger.debug("AccessoryDriver stopped successfully")

    def add_job(self, target, *args):
        """Add job to executor pool."""
        if target is None:
            raise ValueError("Don't call add_job with None.")
        self.loop.call_soon_threadsafe(self.async_add_job, target, *args)

    @callback
    def async_add_job(self, target, *args):
        """Add job from within the event loop."""
        task = None

        if asyncio.iscoroutine(target):
            task = self.loop.create_task(target)
        elif is_callback(target):
            self.loop.call_soon(target, *args)
        elif iscoro(target):
            task = self.loop.create_task(target(*args))
        else:
            task = self.loop.run_in_executor(None, target, *args)

        return task

    @callback
    def async_run_job(self, target, *args):
        """Run job from within the event loop.

        In contract to `async_add_job`, `callbacks` get called immediately.
        """
        if not asyncio.iscoroutine(target) and is_callback(target):
            target(*args)
        else:
            self.async_add_job(target, *args)

    def add_accessory(self, accessory):
        """Add top level accessory to driver."""
        self.accessory = accessory
        if accessory.aid is None:
            accessory.aid = STANDALONE_AID
        elif accessory.aid != STANDALONE_AID:
            raise ValueError("Top-level accessory must have the AID == 1.")
        if os.path.exists(self.persist_file):
            logger.info("Loading Accessory state from `%s`", self.persist_file)
            self.load()
        else:
            logger.info("Storing Accessory state in `%s`", self.persist_file)
            self.persist()

    def subscribe_client_topic(self, client, topic, subscribe=True):
        """(Un)Subscribe the given client from the given topic, thread-safe.

        :param client: A client (address, port) tuple that should be subscribed.
        :type client: tuple <str, int>

        :param topic: The topic to which to subscribe.
        :type topic: str

        :param subscribe: Whether to subscribe or unsubscribe the client. Both subscribing
            an already subscribed client and unsubscribing a client that is not subscribed
            do nothing.
        :type subscribe: bool
        """
        with self.topic_lock:
            if subscribe:
                subscribed_clients = self.topics.get(topic)
                if subscribed_clients is None:
                    subscribed_clients = set()
                    self.topics[topic] = subscribed_clients
                subscribed_clients.add(client)
            else:
                if topic not in self.topics:
                    return
                subscribed_clients = self.topics[topic]
                subscribed_clients.discard(client)
                if not subscribed_clients:
                    del self.topics[topic]

    def publish(self, data, sender_client_addr=None):
        """Publishes an event to the client.

        The publishing occurs only if the current client is subscribed to the topic for
        the aid and iid contained in the data.

        :param data: The data to publish. It must at least contain the keys "aid" and
            "iid".
        :type data: dict
        """
        topic = get_topic(data[HAP_REPR_AID], data[HAP_REPR_IID])
        if topic not in self.topics:
            return

        data = {HAP_REPR_CHARS: [data]}
        bytedata = json.dumps(data).encode()
        self.event_queue.put((topic, bytedata, sender_client_addr))

    def send_events(self):
        """Start sending events from the queue to clients.

        This continues until self.run_sentinel is set. The method logs the average
        queue size for the past NUM_EVENTS_BEFORE_STATS. Enable debug logging to see this
        information.

        Whenever sending an event fails (i.e. HAPServer.push_event returns False), the
        intended client is removed from the set of subscribed clients for the topic.

        @note: This method blocks on Queue.get, waiting for something to come. Thus, if
        this is not run in a daemon thread or it is run on the main thread, the app will
        hang.
        """
        while not self.loop.is_closed():
            # Maybe consider having a pool of worker threads, each performing a send in
            # order to increase throughput.
            #
            # Clients that made the characteristic change are NOT susposed to get events
            # about the characteristic change as it can cause an HTTP disconnect and violates
            # the HAP spec
            #
            topic, bytedata, sender_client_addr = self.event_queue.get()
            subscribed_clients = self.topics.get(topic, [])
            logger.debug(
                'Send event: topic(%s), data(%s), sender_client_addr(%s)',
                topic, bytedata, sender_client_addr)
            for client_addr in subscribed_clients.copy():
                if sender_client_addr and sender_client_addr == client_addr:
                    logger.debug(
                        'Skip sending event to client since its the client that made the characteristic change: %s',
                        client_addr)
                    continue
                logger.debug('Sending event to client: %s', client_addr)
                pushed = self.http_server.push_event(bytedata, client_addr)
                if not pushed:
                    logger.debug(
                        'Could not send event to %s, probably stale socket.',
                        client_addr)
                    # Maybe consider removing the client_addr from every topic?
                    self.subscribe_client_topic(client_addr, topic, False)
            self.event_queue.task_done()
            self.sent_events += 1
            self.accumulated_qsize += self.event_queue.qsize()

            if self.sent_events > self.NUM_EVENTS_BEFORE_STATS:
                logger.debug('Average queue size for the past %s events: %.2f',
                             self.sent_events,
                             self.accumulated_qsize / self.sent_events)
                self.sent_events = 0
                self.accumulated_qsize = 0

    def config_changed(self):
        """Notify the driver that the accessory's configuration has changed.

        Persists the accessory, so that the new configuration is available on
        restart. Also, updates the mDNS advertisement, so that iOS clients know they need
        to fetch new data.
        """
        self.state.config_version += 1
        self.persist()
        self.update_advertisement()

    def update_advertisement(self):
        """Updates the mDNS service info for the accessory."""
        self.advertiser.unregister_service(self.mdns_service_info)
        self.mdns_service_info = AccessoryMDNSServiceInfo(
            self.accessory, self.state)
        time.sleep(0.1)  # Doing it right away can cause crashes.
        self.advertiser.register_service(self.mdns_service_info)

    def persist(self):
        """Saves the state of the accessory."""
        with open(self.persist_file, 'w') as fp:
            self.encoder.persist(fp, self.state)

    def load(self):
        """ """
        with open(self.persist_file, 'r') as fp:
            self.encoder.load_into(fp, self.state)

    def pair(self, client_uuid, client_public):
        """Called when a client has paired with the accessory.

        Persist the new accessory state.

        :param client_uuid: The client uuid.
        :type client_uuid: uuid.UUID

        :param client_public: The client's public key.
        :type client_public: bytes

        :return: Whether the pairing is successful.
        :rtype: bool
        """
        # TODO: Adding a client is a change in the acc. configuration. Then, should we
        # let the accessory call config_changed, which will persist and update mDNS?
        # See also unpair.
        logger.info("Paired with %s.", client_uuid)
        self.state.add_paired_client(client_uuid, client_public)
        self.persist()
        return True

    def unpair(self, client_uuid):
        """Removes the paired client from the accessory.

        Persist the new accessory state.

        :param client_uuid: The client uuid.
        :type client_uuid: uuid.UUID
        """
        logger.info("Unpairing client %s.", client_uuid)
        self.state.remove_paired_client(client_uuid)
        self.persist()

    def finish_pair(self):
        """Finishing pairing or unpairing.

        Updates the accessory and updates the mDNS service.

        The mDNS announcement must not be updated until AFTER
        the final pairing response is sent or homekit will
        see that the accessory is already paired and assume
        it should stop pairing.
        """
        # Safe mode added to avoid error during pairing, see
        # https://github.com/home-assistant/home-assistant/issues/14567
        #
        # This may no longer be needed now that we defer
        # updating the advertisement until after the final
        # pairing response is sent.
        #
        if not self.safe_mode:
            self.update_advertisement()

    def setup_srp_verifier(self):
        """Create an SRP verifier for the accessory's info."""
        # TODO: Move the below hard-coded values somewhere nice.
        ctx = get_srp_context(3072, hashlib.sha512, 16)
        verifier = SrpServer(ctx, b'Pair-Setup', self.state.pincode)
        self.srp_verifier = verifier

    def get_accessories(self):
        """Returns the accessory in HAP format.

        :return: An example HAP representation is:

        .. code-block:: python

           {
              "accessories": [
                 "aid": 1,
                 "services": [
                    "iid": 1,
                    "type": ...,
                    "characteristics": [{
                       "iid": 2,
                       "type": ...,
                       "description": "CurrentTemperature",
                       ...
                    }]
                 ]
              ]
           }

        :rtype: dict
        """
        hap_rep = self.accessory.to_HAP()
        if not isinstance(hap_rep, list):
            hap_rep = [
                hap_rep,
            ]
        logger.debug("Get accessories response: %s", hap_rep)
        return {HAP_REPR_ACCS: hap_rep}

    def get_characteristics(self, char_ids):
        """Returns values for the required characteristics.

        :param char_ids: A list of characteristic "paths", e.g. "1.2" is aid 1, iid 2.
        :type char_ids: list<str>

        :return: Status success for each required characteristic. For example:

        .. code-block:: python

           {
              "characteristics: [{
                 "aid": 1,
                 "iid": 2,
                 "status" 0
              }]
           }

        :rtype: dict
        """
        chars = []
        for id in char_ids:
            aid, iid = (int(i) for i in id.split('.'))
            rep = {HAP_REPR_AID: aid, HAP_REPR_IID: iid}
            char = self.accessory.get_characteristic(aid, iid)
            try:
                rep[HAP_REPR_VALUE] = char.get_value()
                rep[HAP_REPR_STATUS] = CHAR_STAT_OK
            except CharacteristicError:
                logger.error("Error getting value for characteristic %s.", id)
                rep[HAP_REPR_STATUS] = SERVICE_COMMUNICATION_FAILURE

            chars.append(rep)
        logger.debug("Get chars response: %s", chars)
        return {HAP_REPR_CHARS: chars}

    def set_characteristics(self, chars_query, client_addr):
        """Called from ``HAPServerHandler`` when iOS configures the characteristics.

        :param chars_query: A configuration query. For example:

        .. code-block:: python

           {
              "characteristics": [{
                 "aid": 1,
                 "iid": 2,
                 "value": False, # Value to set
                 "ev": True # (Un)subscribe for events from this characteristics.
              }]
           }

        :type chars_query: dict
        """
        # TODO: Add support for chars that do no support notifications.
        service_callbacks = {}
        for cq in chars_query[HAP_REPR_CHARS]:
            aid, iid = cq[HAP_REPR_AID], cq[HAP_REPR_IID]
            char = self.accessory.get_characteristic(aid, iid)

            if HAP_PERMISSION_NOTIFY in cq:
                char_topic = get_topic(aid, iid)
                logger.debug("Subscribed client %s to topic %s", client_addr,
                             char_topic)
                self.subscribe_client_topic(client_addr, char_topic,
                                            cq[HAP_PERMISSION_NOTIFY])

            if HAP_REPR_VALUE in cq:
                # TODO: status needs to be based on success of set_value
                char.client_update_value(cq[HAP_REPR_VALUE], client_addr)
                # For some services we want to send all the char value
                # changes at once.  This resolves an issue where we send
                # ON and then BRIGHTNESS and the light would go to 100%
                # and then dim to the brightness because each callback
                # would only send one char at a time.
                service = char.service

                if service and service.setter_callback:
                    service_name = service.display_name
                    service_callbacks.setdefault(aid, {})
                    service_callbacks[aid].setdefault(
                        service_name, [service.setter_callback, {}])
                    service_callbacks[aid][service_name][
                        SERVICE_CALLBACK_DATA][
                            char.display_name] = cq[HAP_REPR_VALUE]

        for aid in service_callbacks:
            for service_name in service_callbacks[aid]:
                service_callbacks[aid][service_name][SERVICE_CALLBACK](
                    service_callbacks[aid][service_name]
                    [SERVICE_CALLBACK_DATA])

    def signal_handler(self, _signal, _frame):
        """Stops the AccessoryDriver for a given signal.

        An AccessoryDriver can be registered as a signal handler with this method. For
        example, you can register it for a KeyboardInterrupt as follows:
        >>> import signal
        >>> signal.signal(signal.SIGINT, anAccDriver.signal_handler)

        Now, when the user hits Ctrl+C, the driver will stop its accessory, the HAP server
        and everything else that needs stopping and will exit gracefully.
        """
        try:
            self.stop()
        except Exception as e:
            logger.error("Could not stop AccessoryDriver because of error: %s",
                         e)
            raise
Ejemplo n.º 47
0
    def setup(self):
        try:
            f = open(token_path)
            self.token = f.read()
            print('read token', self.token)
            f.close()
        except Exception as e:
            print('signalk failed to read token', token_path)
            self.token = False

        try:
            from zeroconf import ServiceBrowser, ServiceStateChange, Zeroconf
        except Exception as e:
            if not self.missingzeroconfwarned:
                print(
                    'signalk: failed to import zeroconf, autodetection not possible'
                )
                print(
                    'try pip3 install zeroconf or apt install python3-zeroconf'
                )
                self.missingzeroconfwarned = True
            time.sleep(20)
            return

        self.last_values = {}
        self.signalk_msgs = {}
        self.signalk_msgs_skip = {}

        self.period = self.client.register(
            RangeProperty('signalk.period', .5, .1, 2, persistent=True))

        self.signalk_host_port = False
        self.signalk_ws_url = False
        self.ws = False

        class Listener:
            def __init__(self, signalk):
                self.signalk = signalk
                self.name_type = False

            def remove_service(self, zeroconf, type, name):
                print('zeroconf service removed', name, type)
                if self.name_type == (name, type):
                    self.signalk.signalk_host_port = False
                    self.signalk.disconnect_signalk()
                    print('signalk server lost')

            def add_service(self, zeroconf, type, name):
                print('zeroconf service add', name, type)
                self.name_type = name, type
                info = zeroconf.get_service_info(type, name)
                if not info:
                    return
                properties = {}
                for name, value in info.properties.items():
                    properties[name.decode()] = value.decode()
                if properties['swname'] == 'signalk-server':
                    try:
                        host_port = socket.inet_ntoa(
                            info.addresses[0]) + ':' + str(info.port)
                    except Exception as e:
                        host_port = socket.inet_ntoa(info.address) + ':' + str(
                            info.port)
                    self.signalk.signalk_host_port = host_port
                    print('signalk server found', host_port)

        zeroconf = Zeroconf()
        listener = Listener(self)
        browser = ServiceBrowser(zeroconf, "_http._tcp.local.", listener)
        #zeroconf.close()
        self.initialized = True
Ejemplo n.º 48
0
Archivo: ota.py Proyecto: yfhe/espurna
                        help="sort devices list by field",
                        default='hostname')
    parser.add_argument("-y",
                        "--yes",
                        help="do not ask for confirmation",
                        default=0,
                        action='count')
    parser.add_argument("hostnames", nargs='*', help="Hostnames to update")
    args = parser.parse_args()

    print()
    print(description)
    print()

    # Look for sevices
    zeroconf = Zeroconf()
    browser = ServiceBrowser(zeroconf,
                             "_arduino._tcp.local.",
                             handlers=[on_service_state_change])
    discover_last = time.time()
    while time.time() < discover_last + DISCOVER_TIMEOUT:
        None
    #zeroconf.close()

    if len(devices) == 0:
        print("Nothing found!\n")
        sys.exit(0)

    # Sort list
    field = args.sort.lower()
    if field not in devices[0]:
Ejemplo n.º 49
0
 def start(self):
     """
     Start the service
     """
     http_info = ServiceInfo('_http._tcp.local.', 'OpenLP._http._tcp.local.',
                             address=self.address, port=self.http_port, properties={})
     ws_info = ServiceInfo('_ws._tcp.local.', 'OpenLP._ws._tcp.local.',
                           address=self.address, port=self.ws_port, properties={})
     zc = Zeroconf()
     zc.register_service(http_info)
     zc.register_service(ws_info)
     self._can_run = True
     while self.can_run():
         sleep(0.1)
     zc.unregister_service(http_info)
     zc.unregister_service(ws_info)
     zc.close()
     self.quit.emit()
Ejemplo n.º 50
0
 def test_add_wrong_state(self):
     """Test early return on wrong state changes."""
     with patch("devolo_plc_api.network.Zeroconf.get_service_info") as gsi:
         network._add({}, Zeroconf(), SERVICE_TYPE, SERVICE_TYPE,
                      ServiceStateChange.Removed)
         assert gsi.call_count == 0
Ejemplo n.º 51
0
def test_qu_response():
    """Handle multicast incoming with the QU bit set."""
    # instantiate a zeroconf instance
    zc = Zeroconf(interfaces=['127.0.0.1'])

    # service definition
    type_ = "_test-srvc-type._tcp.local."
    other_type_ = "_notthesame._tcp.local."
    name = "xxxyyy"
    registration_name = "%s.%s" % (name, type_)
    registration_name2 = "%s.%s" % (name, other_type_)
    desc = {'path': '/~paulsm/'}
    info = ServiceInfo(type_,
                       registration_name,
                       80,
                       0,
                       0,
                       desc,
                       "ash-2.local.",
                       addresses=[socket.inet_aton("10.0.1.2")])
    info2 = ServiceInfo(
        other_type_,
        registration_name2,
        80,
        0,
        0,
        desc,
        "ash-other.local.",
        addresses=[socket.inet_aton("10.0.4.2")],
    )
    # register
    zc.register_service(info)

    def _validate_complete_response(query, out):
        assert out.id == query.id
        has_srv = has_txt = has_a = False
        nbr_additionals = 0
        nbr_answers = len(out.answers)
        nbr_authorities = len(out.authorities)
        for answer in out.additionals:
            nbr_additionals += 1
            if answer.type == const._TYPE_SRV:
                has_srv = True
            elif answer.type == const._TYPE_TXT:
                has_txt = True
            elif answer.type == const._TYPE_A:
                has_a = True
        assert nbr_answers == 1 and nbr_additionals == 3 and nbr_authorities == 0
        assert has_srv and has_txt and has_a

    # With QU should respond to only unicast when the answer has been recently multicast
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unique = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)

    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(query.packets()[0]), "1.2.3.4", const._MDNS_PORT)
    assert multicast_out is None
    _validate_complete_response(query, unicast_out)

    _clear_cache(zc)
    # With QU should respond to only multicast since the response hasn't been seen since 75% of the ttl
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unique = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(query.packets()[0]), "1.2.3.4", const._MDNS_PORT)
    assert unicast_out is None
    _validate_complete_response(query, multicast_out)

    # With QU set and an authorative answer (probe) should respond to both unitcast and multicast since the response hasn't been seen since 75% of the ttl
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unique = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)
    query.add_authorative_answer(info2.dns_pointer())
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(query.packets()[0]), "1.2.3.4", const._MDNS_PORT)
    _validate_complete_response(query, unicast_out)
    _validate_complete_response(query, multicast_out)

    _inject_response(zc, r.DNSIncoming(multicast_out.packets()[0]))
    # With the cache repopulated; should respond to only unicast when the answer has been recently multicast
    query = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
    question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
    question.unique = True  # Set the QU bit
    assert question.unicast is True
    query.add_question(question)
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(query.packets()[0]), "1.2.3.4", const._MDNS_PORT)
    assert multicast_out is None
    _validate_complete_response(query, unicast_out)
    # unregister
    zc.unregister_service(info)
    zc.close()
Ejemplo n.º 52
0
def main():

    try:
        # Register the ethoscope using zeroconf so that the node knows about it.
        # I need an address to register the service, but I don't understand which one (different
        # interfaces will have different addresses). The python module zeroconf fails if I don't
        # provide one, and the way it gets supplied doesn't appear to be IPv6 compatible. I'll put
        # in whatever I get from "gethostbyname" but not trust that in the code on the node side.

        # we include the machine-id together with the hostname to make sure each device is really unique
        # moreover, we will burn the ETHOSCOPE_000 img with a non existing /etc/machine-id file
        # to make sure each burned image will get a unique machine-id at the first boot

        hostname = socket.gethostname()
        uid = "%s-%s" % (hostname, get_machine_id())

        logging.warning("Waiting for a network connection")
        address = False
        while address is False:
            try:
                #address = socket.gethostbyname(hostname+".local")
                address = socket.gethostbyname(hostname)
                #this returns something like '192.168.1.4' - when both connected, ethernet IP has priority over wifi IP
            except:
                pass
                #address = socket.gethostbyname(hostname)
                #this returns '127.0.1.1' and it is useless

        logging.info(f"Registering service at PORT {PORT}")
        serviceInfo = ServiceInfo("_ethoscope._tcp.local.",
                                  uid + "._ethoscope._tcp.local.",
                                  address=socket.inet_aton(address),
                                  port=PORT,
                                  properties={
                                      'version': '0.0.1',
                                      'id_page': '/id',
                                      'user_options_page': '/user_options',
                                      'static_page': '/static',
                                      'controls_page': '/controls',
                                      'user_options_page': '/user_options'
                                  })
        zeroconf = Zeroconf()
        zeroconf.register_service(serviceInfo)
        logging.info('Device registered')

    except OSError as e:
        logging.error(e)
        logging.error(traceback.format_exc())
        for i in range(9):
            time.sleep(10)
            return main()

    except Exception as e:
        try:
            logging.info('Device not registered')
            logging.error(e)
            logging.error(traceback.format_exc())
            zeroconf.unregister_service(serviceInfo)
            zeroconf.close()
        except Exception:
            pass

        close(1)

    finally:
        try:
            zeroconf.unregister_service(serviceInfo)
            zeroconf.close()
        except:
            pass
        close()
Ejemplo n.º 53
0
from pymongo import MongoClient
from functools import wraps
from flask import Flask, request, Response, jsonify
from zeroconf import Zeroconf
import socket
import pickle
import requests
from canvas_token import authentication

import requests

collection = None
client = None

r = Zeroconf()

# Setup for the canvas API stuff here
auth = authentication()
canvas_key = auth.getcanvas_key()
canvas_url = auth.getcanvas_url()
# Set up a session
session = requests.Session()
session.headers = {'Authorization': 'Bearer %s' % canvas_key}


def get_file_from_canvas(download_filename):
    params = (('sort=name'))
    session.headers = {'Authorization': 'Bearer %s' % canvas_key}
    r = session.get(canvas_url + '?' + 'only[]=names', params=params)
    r.raise_for_status()
    r = r.json()
Ejemplo n.º 54
0
 def on_service_state_change(zeroconf: Zeroconf, service_type: str,
                             name: str, state_change: ServiceStateChange):
     if state_change is ServiceStateChange.Added:
         info = zeroconf.get_service_info(service_type, name)
         if info and info.port != 80:  # ignore the actual amplipi service on your network
             services_advertised[name] = info
Ejemplo n.º 55
0
    def __init__(self,
                 *,
                 address=None,
                 port=51234,
                 persist_file='accessory.state',
                 pincode=None,
                 encoder=None,
                 loader=None,
                 loop=None,
                 mac=None,
                 listen_address=None,
                 advertised_address=None,
                 interface_choice=None):
        """
        Initialize a new AccessoryDriver object.

        :param pincode: The pincode that HAP clients must prove they know in order
            to pair with this `Accessory`. Defaults to None, in which case a random
            pincode is generated. The pincode has the format "xxx-xx-xxx", where x is
            a digit.
        :type pincode: bytearray

        :param port: The local port on which the accessory will be accessible.
            In other words, this is the port of the HAPServer.
        :type port: int

        :param address: The local address on which the accessory will be accessible.
            In other words, this is the address of the HAPServer. If not given, the
            driver will try to select an address.
        :type address: str

        :param persist_file: The file name in which the state of the accessory
            will be persisted. This uses `expandvars`, so may contain `~` to
            refer to the user's home directory.
        :type persist_file: str

        :param encoder: The encoder to use when persisting/loading the Accessory state.
        :type encoder: AccessoryEncoder

        :param mac: The MAC address which will be used to identify the accessory.
            If not given, the driver will try to select a MAC address.
        :type mac: str

        :param listen_address: The local address on the HAPServer will listen.
            If not given, the value of the address parameter will be used.
        :type listen_address: str

        :param advertised_address: The address of the HAPServer announced via mDNS.
            This can be used to announce an external address from behind a NAT.
            If not given, the value of the address parameter will be used.
        :type advertised_address: str

        :param interface_choice: The zeroconf interfaces to listen on.
        :type InterfacesType: [InterfaceChoice.Default, InterfaceChoice.All]
        """
        if sys.platform == 'win32':
            self.loop = loop or asyncio.ProactorEventLoop()
        else:
            self.loop = loop or asyncio.new_event_loop()

        executor_opts = {'max_workers': None}
        if sys.version_info >= (3, 6):
            executor_opts['thread_name_prefix'] = 'SyncWorker'

        self.executor = ThreadPoolExecutor(**executor_opts)
        self.loop.set_default_executor(self.executor)

        self.accessory = None
        self.http_server_thread = None
        if interface_choice is not None:
            self.advertiser = Zeroconf(interfaces=interface_choice)
        else:
            self.advertiser = Zeroconf()
        self.persist_file = os.path.expanduser(persist_file)
        self.encoder = encoder or AccessoryEncoder()
        self.topics = {}  # topic: set of (address, port) of subscribed clients
        self.topic_lock = threading.Lock(
        )  # for exclusive access to the topics
        self.loader = loader or Loader()
        self.aio_stop_event = asyncio.Event(loop=self.loop)
        self.stop_event = threading.Event()
        self.event_queue = queue.Queue()  # (topic, bytes)
        self.send_event_thread = None  # the event dispatch thread
        self.sent_events = 0
        self.accumulated_qsize = 0

        self.safe_mode = False

        self.mdns_service_info = None
        self.srp_verifier = None

        address = address or util.get_local_address()
        advertised_address = advertised_address or address
        self.state = State(address=advertised_address,
                           mac=mac,
                           pincode=pincode,
                           port=port)

        listen_address = listen_address or address
        network_tuple = (listen_address, self.state.port)
        self.http_server = HAPServer(network_tuple, self)
Ejemplo n.º 56
0
    def listen(self, add_device: Callable):
        self._add_device = add_device

        zeroconf = Zeroconf()
        ServiceBrowser(zeroconf, '_yandexio._tcp.local.', listener=self)
Ejemplo n.º 57
0
class UM3OutputDevicePlugin(OutputDevicePlugin):
    addDeviceSignal = Signal()
    removeDeviceSignal = Signal()
    discoveredDevicesChanged = Signal()

    def __init__(self):
        super().__init__()

        self._zero_conf = None
        self._zero_conf_browser = None

        # Create a cloud output device manager that abstracts all cloud connection logic away.
        self._cloud_output_device_manager = CloudOutputDeviceManager()

        # Because the model needs to be created in the same thread as the QMLEngine, we use a signal.
        self.addDeviceSignal.connect(self._onAddDevice)
        self.removeDeviceSignal.connect(self._onRemoveDevice)

        CuraApplication.getInstance().globalContainerStackChanged.connect(
            self.reCheckConnections)

        self._discovered_devices = {}

        self._network_manager = QNetworkAccessManager()
        self._network_manager.finished.connect(self._onNetworkRequestFinished)

        self._min_cluster_version = Version("4.0.0")

        self._api_version = "1"
        self._api_prefix = "/api/v" + self._api_version + "/"
        self._cluster_api_version = "1"
        self._cluster_api_prefix = "/cluster-api/v" + self._cluster_api_version + "/"

        # Get list of manual instances from preferences
        self._preferences = CuraApplication.getInstance().getPreferences()
        self._preferences.addPreference(
            "um3networkprinting/manual_instances",
            "")  # A comma-separated list of ip adresses or hostnames

        self._manual_instances = self._preferences.getValue(
            "um3networkprinting/manual_instances").split(",")

        # Store the last manual entry key
        self._last_manual_entry_key = ""  # type: str

        # The zero-conf service changed requests are handled in a separate thread, so we can re-schedule the requests
        # which fail to get detailed service info.
        # Any new or re-scheduled requests will be appended to the request queue, and the handling thread will pick
        # them up and process them.
        self._service_changed_request_queue = Queue()
        self._service_changed_request_event = Event()
        self._service_changed_request_thread = Thread(
            target=self._handleOnServiceChangedRequests, daemon=True)
        self._service_changed_request_thread.start()

    def getDiscoveredDevices(self):
        return self._discovered_devices

    def getLastManualDevice(self) -> str:
        return self._last_manual_entry_key

    def resetLastManualDevice(self) -> None:
        self._last_manual_entry_key = ""

    ##  Start looking for devices on network.
    def start(self):
        self.startDiscovery()
        self._cloud_output_device_manager.start()

    def startDiscovery(self):
        self.stop()
        if self._zero_conf_browser:
            self._zero_conf_browser.cancel()
            self._zero_conf_browser = None  # Force the old ServiceBrowser to be destroyed.

        for instance_name in list(self._discovered_devices):
            self._onRemoveDevice(instance_name)

        self._zero_conf = Zeroconf()
        self._zero_conf_browser = ServiceBrowser(
            self._zero_conf, u'_ultimaker._tcp.local.',
            [self._appendServiceChangedRequest])

        # Look for manual instances from preference
        for address in self._manual_instances:
            if address:
                self.addManualDevice(address)
        self.resetLastManualDevice()

    def reCheckConnections(self):
        active_machine = CuraApplication.getInstance().getGlobalContainerStack(
        )
        if not active_machine:
            return

        um_network_key = active_machine.getMetaDataEntry("um_network_key")

        for key in self._discovered_devices:
            if key == um_network_key:
                if not self._discovered_devices[key].isConnected():
                    Logger.log("d", "Attempting to connect with [%s]" % key)
                    # It should already be set, but if it actually connects we know for sure it's supported!
                    active_machine.addConfiguredConnectionType(
                        self._discovered_devices[key].connectionType.value)
                    self._discovered_devices[key].connect()
                    self._discovered_devices[
                        key].connectionStateChanged.connect(
                            self._onDeviceConnectionStateChanged)
                else:
                    self._onDeviceConnectionStateChanged(key)
            else:
                if self._discovered_devices[key].isConnected():
                    Logger.log(
                        "d", "Attempting to close connection with [%s]" % key)
                    self._discovered_devices[key].close()
                    self._discovered_devices[
                        key].connectionStateChanged.disconnect(
                            self._onDeviceConnectionStateChanged)

    def _onDeviceConnectionStateChanged(self, key):
        if key not in self._discovered_devices:
            return
        if self._discovered_devices[key].isConnected():
            # Sometimes the status changes after changing the global container and maybe the device doesn't belong to this machine
            um_network_key = CuraApplication.getInstance(
            ).getGlobalContainerStack().getMetaDataEntry("um_network_key")
            if key == um_network_key:
                self.getOutputDeviceManager().addOutputDevice(
                    self._discovered_devices[key])
        else:
            self.getOutputDeviceManager().removeOutputDevice(key)

    def stop(self):
        if self._zero_conf is not None:
            Logger.log("d", "zeroconf close...")
            self._zero_conf.close()
        self._cloud_output_device_manager.stop()

    def removeManualDevice(self, key, address=None):
        if key in self._discovered_devices:
            if not address:
                address = self._discovered_devices[key].ipAddress
            self._onRemoveDevice(key)
            self.resetLastManualDevice()

        if address in self._manual_instances:
            self._manual_instances.remove(address)
            self._preferences.setValue("um3networkprinting/manual_instances",
                                       ",".join(self._manual_instances))

    def addManualDevice(self, address):
        if address not in self._manual_instances:
            self._manual_instances.append(address)
            self._preferences.setValue("um3networkprinting/manual_instances",
                                       ",".join(self._manual_instances))

        instance_name = "manual:%s" % address
        properties = {
            b"name": address.encode("utf-8"),
            b"address": address.encode("utf-8"),
            b"manual": b"true",
            b"incomplete": b"true",
            b"temporary":
            b"true"  # Still a temporary device until all the info is retrieved in _onNetworkRequestFinished
        }

        if instance_name not in self._discovered_devices:
            # Add a preliminary printer instance
            self._onAddDevice(instance_name, address, properties)
        self._last_manual_entry_key = instance_name

        self._checkManualDevice(address)

    def _checkManualDevice(self, address):
        # Check if a UM3 family device exists at this address.
        # If a printer responds, it will replace the preliminary printer created above
        # origin=manual is for tracking back the origin of the call
        url = QUrl("http://" + address + self._api_prefix + "system")
        name_request = QNetworkRequest(url)
        self._network_manager.get(name_request)

    def _onNetworkRequestFinished(self, reply):
        reply_url = reply.url().toString()

        if "system" in reply_url:
            if reply.attribute(QNetworkRequest.HttpStatusCodeAttribute) != 200:
                # Something went wrong with checking the firmware version!
                return

            try:
                system_info = json.loads(
                    bytes(reply.readAll()).decode("utf-8"))
            except:
                Logger.log("e", "Something went wrong converting the JSON.")
                return

            address = reply.url().host()
            has_cluster_capable_firmware = Version(
                system_info["firmware"]) > self._min_cluster_version
            instance_name = "manual:%s" % address
            properties = {
                b"name": (system_info["name"] + " (manual)").encode("utf-8"),
                b"address": address.encode("utf-8"),
                b"firmware_version": system_info["firmware"].encode("utf-8"),
                b"manual": b"true",
                b"machine":
                str(system_info['hardware']["typeid"]).encode("utf-8")
            }

            if has_cluster_capable_firmware:
                # Cluster needs an additional request, before it's completed.
                properties[b"incomplete"] = b"true"

            # Check if the device is still in the list & re-add it with the updated
            # information.
            if instance_name in self._discovered_devices:
                self._onRemoveDevice(instance_name)
                self._onAddDevice(instance_name, address, properties)

            if has_cluster_capable_firmware:
                # We need to request more info in order to figure out the size of the cluster.
                cluster_url = QUrl("http://" + address +
                                   self._cluster_api_prefix + "printers/")
                cluster_request = QNetworkRequest(cluster_url)
                self._network_manager.get(cluster_request)

        elif "printers" in reply_url:
            if reply.attribute(QNetworkRequest.HttpStatusCodeAttribute) != 200:
                # Something went wrong with checking the amount of printers the cluster has!
                return
            # So we confirmed that the device is in fact a cluster printer, and we should now know how big it is.
            try:
                cluster_printers_list = json.loads(
                    bytes(reply.readAll()).decode("utf-8"))
            except:
                Logger.log("e", "Something went wrong converting the JSON.")
                return
            address = reply.url().host()
            instance_name = "manual:%s" % address
            if instance_name in self._discovered_devices:
                device = self._discovered_devices[instance_name]
                properties = device.getProperties().copy()
                if b"incomplete" in properties:
                    del properties[b"incomplete"]
                properties[b'cluster_size'] = len(cluster_printers_list)
                self._onRemoveDevice(instance_name)
                self._onAddDevice(instance_name, address, properties)

    def _onRemoveDevice(self, device_id):
        device = self._discovered_devices.pop(device_id, None)
        if device:
            if device.isConnected():
                device.disconnect()
                try:
                    device.connectionStateChanged.disconnect(
                        self._onDeviceConnectionStateChanged)
                except TypeError:
                    # Disconnect already happened.
                    pass

            self.discoveredDevicesChanged.emit()

    def _onAddDevice(self, name, address, properties):
        # Check what kind of device we need to add; Depending on the firmware we either add a "Connect"/"Cluster"
        # or "Legacy" UM3 device.
        cluster_size = int(properties.get(b"cluster_size", -1))

        printer_type = properties.get(b"machine", b"").decode("utf-8")
        printer_type_identifiers = {
            "9066": "ultimaker3",
            "9511": "ultimaker3_extended",
            "9051": "ultimaker_s5"
        }

        for key, value in printer_type_identifiers.items():
            if printer_type.startswith(key):
                properties[b"printer_type"] = bytes(value, encoding="utf8")
                break
        else:
            properties[b"printer_type"] = b"Unknown"
        if cluster_size >= 0:
            device = ClusterUM3OutputDevice.ClusterUM3OutputDevice(
                name, address, properties)
        else:
            device = LegacyUM3OutputDevice.LegacyUM3OutputDevice(
                name, address, properties)

        self._discovered_devices[device.getId()] = device
        self.discoveredDevicesChanged.emit()

        global_container_stack = CuraApplication.getInstance(
        ).getGlobalContainerStack()
        if global_container_stack and device.getId(
        ) == global_container_stack.getMetaDataEntry("um_network_key"):
            # Ensure that the configured connection type is set.
            global_container_stack.addConfiguredConnectionType(
                device.connectionType.value)
            device.connect()
            device.connectionStateChanged.connect(
                self._onDeviceConnectionStateChanged)

    ##  Appends a service changed request so later the handling thread will pick it up and processes it.
    def _appendServiceChangedRequest(self, zeroconf, service_type, name,
                                     state_change):
        # append the request and set the event so the event handling thread can pick it up
        item = (zeroconf, service_type, name, state_change)
        self._service_changed_request_queue.put(item)
        self._service_changed_request_event.set()

    def _handleOnServiceChangedRequests(self):
        while True:
            # Wait for the event to be set
            self._service_changed_request_event.wait(timeout=5.0)

            # Stop if the application is shutting down
            if CuraApplication.getInstance().isShuttingDown():
                return

            self._service_changed_request_event.clear()

            # Handle all pending requests
            reschedule_requests = [
            ]  # A list of requests that have failed so later they will get re-scheduled
            while not self._service_changed_request_queue.empty():
                request = self._service_changed_request_queue.get()
                zeroconf, service_type, name, state_change = request
                try:
                    result = self._onServiceChanged(zeroconf, service_type,
                                                    name, state_change)
                    if not result:
                        reschedule_requests.append(request)
                except Exception:
                    Logger.logException(
                        "e",
                        "Failed to get service info for [%s] [%s], the request will be rescheduled",
                        service_type, name)
                    reschedule_requests.append(request)

            # Re-schedule the failed requests if any
            if reschedule_requests:
                for request in reschedule_requests:
                    self._service_changed_request_queue.put(request)

    ##  Handler for zeroConf detection.
    #   Return True or False indicating if the process succeeded.
    #   Note that this function can take over 3 seconds to complete. Be careful
    #   calling it from the main thread.
    def _onServiceChanged(self, zero_conf, service_type, name, state_change):
        if state_change == ServiceStateChange.Added:
            # First try getting info from zero-conf cache
            info = ServiceInfo(service_type, name, properties={})
            for record in zero_conf.cache.entries_with_name(name.lower()):
                info.update_record(zero_conf, time(), record)

            for record in zero_conf.cache.entries_with_name(info.server):
                info.update_record(zero_conf, time(), record)
                if info.address:
                    break

            # Request more data if info is not complete
            if not info.address:
                info = zero_conf.get_service_info(service_type, name)

            if info:
                type_of_device = info.properties.get(b"type", None)
                if type_of_device:
                    if type_of_device == b"printer":
                        address = '.'.join(map(lambda n: str(n), info.address))
                        self.addDeviceSignal.emit(str(name), address,
                                                  info.properties)
                    else:
                        Logger.log(
                            "w",
                            "The type of the found device is '%s', not 'printer'! Ignoring.."
                            % type_of_device)
            else:
                Logger.log("w", "Could not get information about %s" % name)
                return False

        elif state_change == ServiceStateChange.Removed:
            Logger.log("d", "Bonjour service removed: %s" % name)
            self.removeDeviceSignal.emit(str(name))

        return True
class OctoPrintOutputDevicePlugin(OutputDevicePlugin):
    def __init__(self):
        super().__init__()
        self._zero_conf = Zeroconf()
        self._browser = None
        self._printers = {}

        # Because the model needs to be created in the same thread as the QMLEngine, we use a signal.
        self.addPrinterSignal.connect(self.addPrinter)
        Application.getInstance().globalContainerStackChanged.connect(
            self.reCheckConnections)

    addPrinterSignal = Signal()

    ##  Start looking for devices on network.
    def start(self):
        self._browser = ServiceBrowser(self._zero_conf,
                                       u'_octoprint._tcp.local.',
                                       [self._onServiceChanged])

    ##  Stop looking for devices on network.
    def stop(self):
        self._zero_conf.close()

    def getPrinters(self):
        return self._printers

    def reCheckConnections(self):
        global_container_stack = Application.getInstance(
        ).getGlobalContainerStack()
        if not global_container_stack:
            return

        for key in self._printers:
            if key == global_container_stack.getMetaDataEntry("octoprint_id"):
                self._printers[key].setApiKey(
                    global_container_stack.getMetaDataEntry(
                        "octoprint_api_key", ""))
                self._printers[key].connectionStateChanged.connect(
                    self._onPrinterConnectionStateChanged)
                self._printers[key].connect()
            else:
                if self._printers[key].isConnected():
                    self._printers[key].close()

    ##  Because the model needs to be created in the same thread as the QMLEngine, we use a signal.
    def addPrinter(self, name, address, properties):
        printer = OctoPrintOutputDevice.OctoPrintOutputDevice(
            name, address, properties)
        self._printers[printer.getKey()] = printer
        global_container_stack = Application.getInstance(
        ).getGlobalContainerStack()
        if global_container_stack and printer.getKey(
        ) == global_container_stack.getMetaDataEntry("octoprint_id"):
            printer.setApiKey(
                global_container_stack.getMetaDataEntry("octoprint_api_key"),
                "")
            printer.connectionStateChanged.connect(
                self._onPrinterConnectionStateChanged)
            printer.connect()

    ##  Handler for when the connection state of one of the detected printers changes
    def _onPrinterConnectionStateChanged(self, key):
        if self._printers[key].isConnected():
            self.getOutputDeviceManager().addOutputDevice(self._printers[key])
        else:
            self.getOutputDeviceManager().removeOutputDevice(key)

    ##  Handler for zeroConf detection
    def _onServiceChanged(self, zeroconf, service_type, name, state_change):
        if state_change == ServiceStateChange.Added:
            info = zeroconf.get_service_info(service_type, name)
            if info:
                address = '.'.join(map(lambda n: str(n), info.address))
                self.addPrinterSignal.emit(str(name), address, info.properties)

        elif state_change == ServiceStateChange.Removed:
            pass
Ejemplo n.º 59
0
class MKSOutputDevicePlugin(QObject, OutputDevicePlugin):
    def __init__(self):
        super().__init__()
        self._zero_conf = None
        self._browser = None
        self._printers = {}
        self._discovered_devices = {}

        self._error_message = None

        self._old_printers = []

        self.addPrinterSignal.connect(self.addPrinter)
        self.removePrinterSignal.connect(self.removePrinter)

        self._preferences = Application.getInstance().getPreferences()
        self._preferences.addPreference("mkswifi/manual_instances", "")
        self._preferences.addPreference("local_file/last_used_type", "")
        self._preferences.addPreference("local_file/dialog_save_path", "")
        self._manual_instances = self._preferences.getValue(
            "mkswifi/manual_instances").split(",")
        Application.getInstance().globalContainerStackChanged.connect(
            self.reCheckConnections)

        self._service_changed_request_queue = Queue()
        self._service_changed_request_event = Event()
        self._service_changed_request_thread = Thread(
            target=self._handleOnServiceChangedRequests, daemon=True)
        self._service_changed_request_thread.start()

        self._changestage = False

    addPrinterSignal = Signal()
    removePrinterSignal = Signal()
    printerListChanged = Signal()

    def start(self):

        self.startDiscovery()

    def startDiscovery(self):
        self.stop()
        self.getOutputDeviceManager().addOutputDevice(
            SaveOutputDevice.SaveOutputDevice())
        if self._browser:
            self._browser.cancel()
            self._browser = None
            self._old_printers = [
                printer_name for printer_name in self._printers
            ]
            self._printers = {}
            self.printerListChanged.emit()
        self._zero_conf = Zeroconf()
        self._browser = ServiceBrowser(self._zero_conf, u'_mks._tcp.local.',
                                       [self._appendServiceChangedRequest])
        for address in self._manual_instances:
            if address:
                self.addManualPrinter(address)

    def addManualPrinter(self, address):
        if address not in self._manual_instances:
            self._manual_instances.append(address)
            self._preferences.setValue("mkswifi/manual_instances",
                                       ",".join(self._manual_instances))

        instance_name = "manual:%s" % address
        properties = {
            b"name": address.encode("utf-8"),
            b"address": address.encode("utf-8"),
            b"manual": b"true",
            b"incomplete": b"false"
        }

        if instance_name not in self._printers:
            # Add a preliminary printer instance
            self.addPrinter(instance_name, address, properties)

        # self.checkManualPrinter(address)
        # self.checkClusterPrinter(address)

    def removeManualPrinter(self, key, address=None):
        if key in self._printers:
            if not address:
                address = self._printers[key].ipAddress
            self.removePrinter(key)

        if address in self._manual_instances:
            self._manual_instances.remove(address)
            self._preferences.setValue("mkswifi/manual_instances",
                                       ",".join(self._manual_instances))

    def stop(self):
        # self.getOutputDeviceManager().removeOutputDevice("save_with_screenshot")
        if self._zero_conf is not None:
            Logger.log("d", "zeroconf close...")
            self._zero_conf.close()

    def getPrinters(self):
        return self._printers

    def disConnections(self, key):
        Logger.log("d", "disConnections change %s" % key)
        # for keys in self._printers:
        #     if self._printers[key].isConnected():
        #         Logger.log("d", "Closing connection [%s]..." % key)
        if key in self._printers:
            self._printers[key].disconnect()
            # self._printers[key].connectionStateChanged.disconnect(self._onPrinterConnectionStateChanged)
            self.getOutputDeviceManager().removeOutputDevice(key)
        preferences = Application.getInstance().getPreferences()
        preferences.addPreference("mkswifi/stopupdate", "True")

    def reCheckConnections(self):
        active_machine = Application.getInstance().getGlobalContainerStack()
        Logger.log(
            "d", "GlobalContainerStack change %s" %
            active_machine.getMetaDataEntry("mks_network_key"))
        if not active_machine:
            return

        for key in self._printers:
            if key == active_machine.getMetaDataEntry("mks_network_key"):
                if not self._printers[key].isConnected():
                    Logger.log("d", "Connecting [%s]..." % key)
                    self._printers[key].connect()
                    self._printers[key].connectionStateChanged.connect(
                        self._onPrinterConnectionStateChanged)
            else:
                if self._printers[key].isConnected():
                    Logger.log("d", "Closing connection [%s]..." % key)
                    self._printers[key].disconnect()
                    self._printers[key].connectionStateChanged.disconnect(
                        self._onPrinterConnectionStateChanged)

    def addPrinter(self, name, address, properties):
        printer = MKSOutputDevice.MKSOutputDevice(name, address, properties)
        # self._api_prefix = "/"
        # printer = NetworkPrinterOutputDevice.NetworkPrinterOutputDevice(name, address, properties, self._api_prefix)
        self._printers[printer.getKey()] = printer
        global_container_stack = Application.getInstance(
        ).getGlobalContainerStack()
        if global_container_stack and printer.getKey(
        ) == global_container_stack.getMetaDataEntry("mks_network_key"):
            if printer.getKey(
            ) not in self._old_printers:  # Was the printer already connected, but a re-scan forced?
                Logger.log("d",
                           "addPrinter, connecting [%s]..." % printer.getKey())
                self._printers[printer.getKey()].connect()
                printer.connectionStateChanged.connect(
                    self._onPrinterConnectionStateChanged)
        self.printerListChanged.emit()

    def removePrinter(self, name):
        printer = self._printers.pop(name, None)
        if printer:
            if printer.isConnected():
                printer.disconnect()
                printer.connectionStateChanged.disconnect(
                    self._onPrinterConnectionStateChanged)
                Logger.log("d", "removePrinter, disconnecting [%s]..." % name)
        self.printerListChanged.emit()

    def printertrytoconnect(self):
        Logger.log("d", "mks printertrytoconnect")
        self._changestage = True

    def _onPrinterConnectionStateChanged(self, key):
        if key not in self._printers:
            return
        # Logger.log("d", "mks add output device %s" % self._printers[key].isConnected())
        if self._printers[key].isConnected():
            # Logger.log("d", "mks add output device--------ok--------- %s" % self._printers[key].isConnected())
            if self._error_message:
                self._error_message.hide()
            name = "Printer connect success"
            if CuraApplication.getInstance().getPreferences().getValue(
                    "general/language") == "zh_CN":
                name = "打印机连接成功"
            else:
                name = "Printer connect success"
            self._error_message = Message(name)
            self._error_message.show()
            self.getOutputDeviceManager().addOutputDevice(self._printers[key])
            # preferences = Application.getInstance().getPreferences()
            # if preferences.getValue("mkswifi/changestage"):
            #     preferences.addPreference("mkswifi/changestage", "False")
            #     CuraApplication.getInstance().getController().setActiveStage("MonitorStage")
        else:
            # self.getOutputDeviceManager().removeOutputDevice(key)
            global_container_stack = CuraApplication.getInstance(
            ).getGlobalContainerStack()
            if global_container_stack:
                meta_data = global_container_stack.getMetaData()
                if "mks_network_key" in meta_data:
                    localkey = global_container_stack.getMetaDataEntry(
                        "mks_network_key")
                    # global_container_stack.setMetaDataEntry("mks_network_key", key)
                    # global_container_stack.removeMetaDataEntry(
                    # "network_authentication_id")
                    # global_container_stack.removeMetaDataEntry(
                    # "network_authentication_key")
                    # Logger.log("d", "mks localkey--------ok--------- %s" % localkey)
                    # Logger.log("d", "mks key--------ok--------- %s" % key)
                    if localkey != key and key in self._printers:
                        # self.getOutputDeviceManager().connect()
                        self.getOutputDeviceManager().removeOutputDevice(key)
        # else:
        #     if self._error_message:
        #         self._error_message.hide()
        #     self._error_message = Message(i18n_catalog.i18nc("@info:status", "Printer connect failed"))
        #     self._error_message.show()
        # else:
        #     Logger.log("d", "mks add output device--------ok--------- %s" % self._printers[key].isConnected())
        #     self._printers[key].disconnect()
        # self._printers[key].connectionStateChanged.disconnect(self._onPrinterConnectionStateChanged)

    def _onServiceChanged(self, zeroconf, service_type, name, state_change):
        if state_change == ServiceStateChange.Added:
            Logger.log("d", "Bonjour service added: %s" % name)

            # First try getting info from zeroconf cache
            info = ServiceInfo(service_type, name, properties={})
            for record in zeroconf.cache.entries_with_name(name.lower()):
                info.update_record(zeroconf, time.time(), record)

            for record in zeroconf.cache.entries_with_name(info.server):
                info.update_record(zeroconf, time.time(), record)
                if info.address:
                    break

            # Request more data if info is not complete
            if not info.address:
                Logger.log("d", "Trying to get address of %s", name)
                info = zeroconf.get_service_info(service_type, name)

            if info:
                type_of_device = info.properties.get(b"type", None)
                if type_of_device:
                    if type_of_device == b"printer":
                        address = '.'.join(map(lambda n: str(n), info.address))
                        if address in self._excluded_addresses:
                            Logger.log(
                                "d",
                                "The IP address %s of the printer \'%s\' is not correct. Trying to reconnect.",
                                address, name)
                            return False  # When getting the localhost IP, then try to reconnect
                        self.addPrinterSignal.emit(str(name), address,
                                                   info.properties)
                    else:
                        Logger.log(
                            "w",
                            "The type of the found device is '%s', not 'printer'! Ignoring.."
                            % type_of_device)
            else:
                Logger.log("w", "Could not get information about %s" % name)
                return False

        elif state_change == ServiceStateChange.Removed:
            Logger.log("d", "Bonjour service removed: %s" % name)
            self.removePrinterSignal.emit(str(name))

        return True

    def _appendServiceChangedRequest(self, zeroconf, service_type, name,
                                     state_change):
        # append the request and set the event so the event handling thread can pick it up
        item = (zeroconf, service_type, name, state_change)
        self._service_changed_request_queue.put(item)
        self._service_changed_request_event.set()

    def _handleOnServiceChangedRequests(self):
        while True:
            # wait for the event to be set
            self._service_changed_request_event.wait(timeout=5.0)
            # stop if the application is shutting down
            if Application.getInstance().isShuttingDown():
                return

            self._service_changed_request_event.clear()

            # handle all pending requests
            reschedule_requests = [
            ]  # a list of requests that have failed so later they will get re-scheduled
            while not self._service_changed_request_queue.empty():
                request = self._service_changed_request_queue.get()
                zeroconf, service_type, name, state_change = request
                try:
                    result = self._onServiceChanged(zeroconf, service_type,
                                                    name, state_change)
                    if not result:
                        reschedule_requests.append(request)
                except Exception:
                    Logger.logException(
                        "e",
                        "Failed to get service info for [%s] [%s], the request will be rescheduled",
                        service_type, name)
                    reschedule_requests.append(request)

            # re-schedule the failed requests if any
            if reschedule_requests:
                for request in reschedule_requests:
                    self._service_changed_request_queue.put(request)

    @pyqtSlot()
    def openControlPanel(self):
        Logger.log("d", "Opening print jobs web UI...")
        selected_device = self.getOutputDeviceManager().getActiveDevice()
        self._monitor_view_qml_path = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), "MonitorItem4x.qml")
        self.__additional_components_view = Application.getInstance(
        ).createQmlComponent(self._monitor_view_qml_path, {"manager": self})
Ejemplo n.º 60
0
def test_known_answer_supression_service_type_enumeration_query():
    zc = Zeroconf(interfaces=['127.0.0.1'])
    type_ = "_knownservice._tcp.local."
    name = "knownname"
    registration_name = "%s.%s" % (name, type_)
    desc = {'path': '/~paulsm/'}
    server_name = "ash-2.local."
    info = ServiceInfo(type_,
                       registration_name,
                       80,
                       0,
                       0,
                       desc,
                       server_name,
                       addresses=[socket.inet_aton("10.0.1.2")])
    zc.register_service(info)

    type_2 = "_knownservice2._tcp.local."
    name = "knownname"
    registration_name2 = "%s.%s" % (name, type_2)
    desc = {'path': '/~paulsm/'}
    server_name2 = "ash-3.local."
    info = ServiceInfo(type_2,
                       registration_name2,
                       80,
                       0,
                       0,
                       desc,
                       server_name2,
                       addresses=[socket.inet_aton("10.0.1.2")])
    zc.register_service(info)
    now = current_time_millis()
    _clear_cache(zc)

    # Test PTR supression
    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME,
                             const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    packets = generated.packets()
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(packets[0]), "1.2.3.4", const._MDNS_PORT)
    assert unicast_out is None
    assert multicast_out is not None and multicast_out.answers

    generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
    question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME,
                             const._TYPE_PTR, const._CLASS_IN)
    generated.add_question(question)
    generated.add_answer_at_time(
        r.DNSPointer(
            const._SERVICE_TYPE_ENUMERATION_NAME,
            const._TYPE_PTR,
            const._CLASS_IN,
            const._DNS_OTHER_TTL,
            type_,
        ),
        now,
    )
    generated.add_answer_at_time(
        r.DNSPointer(
            const._SERVICE_TYPE_ENUMERATION_NAME,
            const._TYPE_PTR,
            const._CLASS_IN,
            const._DNS_OTHER_TTL,
            type_2,
        ),
        now,
    )
    packets = generated.packets()
    unicast_out, multicast_out = zc.query_handler.response(
        r.DNSIncoming(packets[0]), "1.2.3.4", const._MDNS_PORT)
    assert unicast_out is None
    assert not multicast_out or not multicast_out.answers

    # unregister
    zc.unregister_service(info)
    zc.close()