Esempio n. 1
0
    def controler_socket(self):
        msg = self.controler.recv_multipart()
        try:
            master_id = u(msg[0])
            action = u(msg[1])
            ping_interval = int(msg[2])

            if master_id != "master":
                self.logger.error("Invalid master id '%s'. Should be 'master'",
                                  master_id)
                return
            if action != "PONG":
                self.logger.error("Invalid answer '%s'. Should be 'PONG'",
                                  action)
                return
        except (IndexError, ValueError):
            self.logger.error("Invalid message '%s'", msg)
            return

        if ping_interval < TIMEOUT:
            self.logger.error("invalid ping interval (%d) too small", ping_interval)
            return

        self.logger.debug("master => PONG(%d)", ping_interval)
        self.ping_interval = ping_interval
def write_key(pub_key):
    banner = u("""#   ****  Generated on {0} by tsg  ****
#   ZeroMQ CURVE Public Certificate
#   Exchange securely, or use a secure mechanism to verify the contents
#   of this file after exchange. Store public certificates in your home
#   directory, in the .curve subdirectory.
""")

    base_dir = os.path.dirname(__file__)
    public_keys_dir = os.path.join(base_dir, 'public_keys')
    key = pub_key.decode('utf-8')
    with io.open(
        public_keys_dir+"/"+str(hashlib.sha1(pub_key).hexdigest())
            + ".key", 'w', encoding='utf8') as f:

        f.write(banner.format(datetime.datetime.now()))
        f.write(u('metadata\n'))
        f.write(u('curve\n'))
        f.write(u("    public-key = \"{0}\"\n").format(pub_key))

    #If it's the first client we start the secure server
    if not p.is_alive():
        p.start()
    #stop secure_srv process and restart it to be able to auth new clients
    q_mgmt.put(ThMgmt(0))
    print "reauth sended"
Esempio n. 3
0
    def controler_socket(self):
        try:
            # We need here to use the zmq.NOBLOCK flag, otherwise we could block
            # the whole main loop where this function is called.
            msg = self.controler.recv_multipart(zmq.NOBLOCK)
        except zmq.error.Again:
            return False
        # This is way to verbose for production and should only be activated
        # by (and for) developers
        # self.logger.debug("[CC] Receiving: %s", msg)

        # 1: the hostname (see ZMQ documentation)
        hostname = u(msg[0])
        # 2: the action
        action = u(msg[1])

        # Check that lava-logs only send PINGs
        if hostname == "lava-logs" and action != "PING":
            self.logger.error("%s => %s Invalid action from log daemon",
                              hostname, action)
            return True

        # Handle the actions
        if action == 'HELLO' or action == 'HELLO_RETRY':
            self._handle_hello(hostname, action, msg)
        elif action == 'PING':
            self._handle_ping(hostname, action, msg)
        elif action == 'END':
            self._handle_end(hostname, action, msg)
        elif action == 'START_OK':
            self._handle_start_ok(hostname, action, msg)
        else:
            self.logger.error("<%s> sent unknown action=%s, args=(%s)",
                              hostname, action, msg[1:])
        return True
Esempio n. 4
0
 def test_unicode(self):
     """Test the unicode representations of the Frames."""
     s = u('asdf')
     self.assertRaises(TypeError, zmq.Frame, s)
     for i in range(16):
         s = (2**i)*u('§')
         m = zmq.Frame(s.encode('utf8'))
         self.assertEqual(s, unicode(m.bytes,'utf8'))
Esempio n. 5
0
    def _handle_pipe(self):
        """
        Handle a message from front-end API.
        """
        terminate = False

        # Get the whole message off the pipe in one go
        try:
            # Try/except needed for Windows "support"
            msg = self.pipe.recv_multipart()

            if msg is None:
                terminate = True
                return terminate
        except:
            terminate = True
            return terminate

        command = msg[0]
        self.log.debug("auth received API command %r", command)

        if command == b'ALLOW':
            addresses = [u(m, self.encoding) for m in msg[1:]]
            try:
                self.authenticator.allow(*addresses)
            except Exception as e:
                self.log.exception("Failed to allow %s", addresses)
            self.allow = True

        elif command == b'DENY':
            addresses = [u(m, self.encoding) for m in msg[1:]]
            try:
                self.authenticator.deny(*addresses)
            except Exception as e:
                self.log.exception("Failed to deny %s", addresses)

        elif command == b'PLAIN':
            domain = u(msg[1], self.encoding)
            json_passwords = msg[2]
            self.authenticator.configure_plain(domain, jsonapi.loads(json_passwords))

        elif command == b'CURVE':
            # For now we don't do anything with domains
            domain = u(msg[1], self.encoding)

            # If location is CURVE_ALLOW_ANY, allow all clients. Otherwise
            # treat location as a directory that holds the certificates.
            location = u(msg[2], self.encoding)
            self.authenticator.configure_curve(domain, location)
            self.curve = True

        elif command == b'TERMINATE':
            terminate = True

        else:
            self.log.error("Invalid auth command from API: %r", command)

        return terminate
Esempio n. 6
0
 def test_unicode_message(self):
     logger, handler, sub = self.connect_handler()
     base_topic = b(self.topic + '.INFO')
     for msg, expected in [
         (u('hello'), [base_topic, b('hello\n')]),
         (u('héllo'), [base_topic, b('héllo\n')]),
         (u('tøpic::héllo'), [base_topic + b('.tøpic'), b('héllo\n')]),
     ]:
         logger.info(msg)
         received = sub.recv_multipart()
         self.assertEqual(received, expected)
Esempio n. 7
0
    def messageReceived(self, msg):

        command = msg[0]

        if command == b'ALLOW':
            addresses = [u(m, self.encoding) for m in msg[1:]]
            try:
                self.authenticator.allow(*addresses)
            except Exception as e:
                log.err("Failed to allow %s", addresses)

        elif command == b'CURVE':
            domain = u(msg[1], self.encoding)
            location = u(msg[2], self.encoding)
            self.authenticator.configure_curve(domain, location)
Esempio n. 8
0
File: lava.py Progetto: Linaro/squad
    def listen(self):
        listener_url = self.get_listener_url()

        self.log_debug("connecting to %s" % listener_url)

        self.context = zmq.Context()
        self.socket = self.context.socket(zmq.SUB)
        self.socket.setsockopt_string(zmq.SUBSCRIBE, "")
        try:
            # requires PyZMQ to be built against ZeroMQ 4.2+
            self.socket.setsockopt(zmq.HEARTBEAT_IVL, 1000)  # 1 s
            self.socket.setsockopt(zmq.HEARTBEAT_TIMEOUT, 10000)  # 10 s
        except AttributeError:
            self.log_warn('PyZMQ has no support for heartbeat (requires ZeroMQ library 4.2+), connection may be unstable')
            pass

        self.socket.connect(listener_url)

        self.log_debug("connected to %s" % listener_url)

        while True:
            try:
                message = self.socket.recv_multipart()
                (topic, uuid, dt, username, data) = (u(m) for m in message[:])
                data = json.loads(data)
                self.receive_event(topic, data)
            except Exception as e:
                self.log_error(str(e) + "\n" + traceback.format_exc())
Esempio n. 9
0
    def iter_messages(self):
        """ Yields tuples of (watcher, subtopic, stat)"""
        recv = self.pubsub_socket.recv_multipart
        with self:
            while True:
                try:
                    events = dict(self.poller.poll(self.timeout * 1000))
                except zmq.ZMQError as e:
                    if e.errno == errno.EINTR:
                        continue
                    raise

                if len(events) == 0:
                    continue

                try:
                    topic, stat = recv()
                except zmq.core.error.ZMQError as e:
                    if e.errno != errno.EINTR:
                        raise
                    else:
                        try:
                            sys.exc_clear()
                        except Exception:
                            pass
                        continue

                topic = u(topic).split(".")
                if len(topic) == 3:
                    __, watcher, subtopic = topic
                    yield watcher, subtopic, json.loads(stat)
                elif len(topic) == 2:
                    __, watcher = topic
                    yield watcher, None, json.loads(stat)
Esempio n. 10
0
    def handle_recv(self, data):
        """called each time circusd sends an event"""
        # maintains a periodic callback to compute mem and cpu consumption for
        # each pid.
        logger.debug('Received an event from circusd: %s' % str(data))
        topic, msg = data
        try:
            topic = u(topic)
            watcher = topic.split('.')[1:-1][0]
            action = topic.split('.')[-1]
            msg = json.loads(msg)

            if action in ('reap', 'kill'):
                # a process was reaped
                pid = msg['process_pid']
                self.remove_pid(watcher, pid)
            elif action == 'spawn':
                # a process was added
                pid = msg['process_pid']
                self._append_pid(watcher, pid)
            elif action == 'stop':
                # the whole watcher was stopped.
                self.stop_watcher(watcher)
            else:
                logger.debug('Unknown action: %r' % action)
                logger.debug(msg)
        except Exception:
            logger.exception('Failed to handle %r' % msg)
Esempio n. 11
0
    def run(self):
        self.setup()

        max_db_commit_retry = 3
        while True:
            msg = self.sub.recv_multipart()
            try:
                (topic, uuid, dt, username, data) = (u(m) for m in msg)
                dt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%f")
            except (IndexError, ValueError):
                LOG.error("Invalid message: %s", msg)
                continue

            # Save into the database
            try:
                session = self.sessions()
                message = Message(topic=topic, uuid=uuid, datetime=dt, username=username, data=data)
                session.add(message)
            except SQLAlchemyError as err:
                LOG.erro("Unable to build the new message row: %s", err)
                continue

            # Retry the database commit
            for retry in range(1, max_db_commit_retry + 1):
                try:
                    session.commit()
                except SQLAlchemyError as err:
                    if retry == max_db_commit_retry:
                        LOG.error("Unable to commit to the database, dropping the message")
                        LOG.error("Database error: %s", err)
Esempio n. 12
0
 def handle_recv(self, data):
     topic, msg = data
     topic_parts = u(topic).split(".")
     watcher = topic_parts[1]
     action = topic_parts[2]
     with open(self.config['file'], 'a+') as f:
         f.write('%s:%s' % (watcher, action))
Esempio n. 13
0
    def _handle_pipe(self):
        '''
        Handle a message from front-end API.
        '''
        terminate = False

        # Get the whole message off the pipe in one go
        msg = self.pipe.recv_multipart()

        if msg is None:
            terminate = True
            return terminate

        command = msg[0]
        logging.debug("auth received API command {0}".format(command))

        if command == b'ALLOW':
            address = u(msg[1], self.encoding)
            self.authenticator.allow(address)

        elif command == b'DENY':
            address = u(msg[1], self.encoding)
            self.authenticator.deny(address)

        elif command == b'PLAIN':
            domain = u(msg[1], self.encoding)
            json_passwords = msg[2]
            self.authenticator.configure_plain(domain, jsonapi.loads(json_passwords))

        elif command == b'CURVE':
            # For now we don't do anything with domains
            domain = u(msg[1], self.encoding)

            # If location is CURVE_ALLOW_ANY, allow all clients. Otherwise
            # treat location as a directory that holds the certificates.
            location = u(msg[2], self.encoding)
            self.authenticator.configure_curve(domain, location)

        elif command == b'TERMINATE':
            terminate = True

        else:
            logging.error("Invalid auth command from API: {0}".format(command))

        return terminate
Esempio n. 14
0
def read_from_stream(stream, timeout=5):
    start = time.time()
    while time.time() - start < timeout:
        try:
            data = stream.get_nowait()
            raise tornado.gen.Return(u(data['data']))
        except Empty:
            yield tornado_sleep(0.1)
    raise TimeoutException('Timeout reading queue')
Esempio n. 15
0
 def test():
     a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
     f = b.recv_string()
     assert not f.done()
     msg = u('πøøπ')
     yield a.send_string(msg)
     recvd = yield f
     assert f.done()
     self.assertEqual(f.result(), msg)
     self.assertEqual(recvd, msg)
Esempio n. 16
0
def run_ctl(args, queue=None, stdin=''):
    cmd = '%s -m circus.circusctl' % sys.executable
    proc = subprocess.Popen(cmd.split() + shlex.split(args),
                            stdin=subprocess.PIPE if stdin else None,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    stdout, stderr = proc.communicate(b(stdin) if stdin else None)
    stdout = u(stdout)
    stderr = u(stderr)
    if queue:
        queue.put(stderr)
        queue.put(stdout)
    try:
        import gevent
        if hasattr(gevent, 'shutdown'):
            gevent.shutdown()
    except ImportError:
        pass
    return stdout, stderr
Esempio n. 17
0
    def handle_recv(self, data):
        topic, msg = data
        topic_parts = u(topic).split(".")
        if topic_parts[2] == "reap":
            timeline = self.timelines.get(topic_parts[1], [])
            timeline.append(time.time())
            self.timelines[topic_parts[1]] = timeline

            self.check(topic_parts[1])
        elif topic_parts[2] == "updated":
            self.update_conf(topic_parts[1])
Esempio n. 18
0
def _write_key_file(key_filename, banner, public_key, secret_key=None, metadata=None, encoding='utf-8'):
    """ Create a certificate file """
    if isinstance(public_key, bytes):
        public_key = public_key.decode(encoding)
    if isinstance(secret_key, bytes):
        secret_key = secret_key.decode(encoding)
    with io.open(key_filename, 'w', encoding='utf8') as f:
        f.write(banner.format(datetime.datetime.now()))

        f.write(u('metadata\n'))
        if metadata:
            for k, v in metadata.items():
                if isinstance(v, bytes):
                    v = v.decode(encoding)
                f.write(u("    {0} = {1}\n").format(k, v))

        f.write(u('curve\n'))
        f.write(u("    public-key = \"{0}\"\n").format(public_key))

        if secret_key:
            f.write(u("    secret-key = \"{0}\"\n").format(secret_key))
Esempio n. 19
0
    def run(self):
        self.setup()

        while True:
            msg = self.sock.recv_multipart()
            # TODO: use a pipeline to transform the messages
            try:
                (topic, uuid, dt, username, data) = msg[:]
            except IndexError:
                self.LOG.error("Droping invalid message")
                self.LOG.debug("=> %s", msg)
                continue
            self.LOG.debug("topic: %s, data: %s", u(topic), data)
            self.push.send_multipart(msg)
Esempio n. 20
0
    def __call__(self, data):
        if self._should_rollover(data["data"]):
            self._do_rollover()

        # If we want to prefix the stream with the current datetime
        for line in u(data["data"]).split("\n"):
            if not line:
                continue
            if self.time_format is not None:
                self._file.write(
                    "{time} [{pid}] | ".format(time=self.now().strftime(self.time_format), pid=data["pid"])
                )
            self._file.write(line)
            self._file.write("\n")
        self._file.flush()
Esempio n. 21
0
    def test_curve_user_id(self):
        """threaded auth - CURVE"""
        self.auth.allow('127.0.0.1')
        certs = self.load_certs(self.secret_keys_dir)
        server_public, server_secret, client_public, client_secret = certs

        self.auth.configure_curve(domain='*', location=self.public_keys_dir)
        server = self.socket(zmq.PULL)
        server.curve_publickey = server_public
        server.curve_secretkey = server_secret
        server.curve_server = True
        client = self.socket(zmq.PUSH)
        client.curve_publickey = client_public
        client.curve_secretkey = client_secret
        client.curve_serverkey = server_public
        assert self.can_connect(client, server)
        
        # test default user-id map
        client.send(b'test')
        msg = self.recv(server, copy=False)
        assert msg.bytes == b'test'
        try:
            user_id = msg.get('User-Id')
        except zmq.ZMQVersionError:
            pass
        else:
            assert user_id == u(client_public)

        # test custom user-id map
        self.auth.curve_user_id = lambda client_key: u'custom'

        client2 = self.socket(zmq.PUSH)
        client2.curve_publickey = client_public
        client2.curve_secretkey = client_secret
        client2.curve_serverkey = server_public
        assert self.can_connect(client2, server)

        client2.send(b'test2')
        msg = self.recv(server, copy=False)
        assert msg.bytes == b'test2'
        try:
            user_id = msg.get('User-Id')
        except zmq.ZMQVersionError:
            pass
        else:
            assert user_id == u'custom'
Esempio n. 22
0
    def read_event_socket(self):
        try:
            msg = self.event_socket.recv_multipart(zmq.NOBLOCK)
        except zmq.error.Again:
            return False

        try:
            (topic, _, dt, username, data) = (u(m) for m in msg)
        except ValueError:
            self.logger.error("Invalid event: %s", msg)
            return True

        if topic.endswith(".testjob"):
            try:
                data = simplejson.loads(data)
                if data["state"] == "Canceling":
                    self.events["canceling"].add(int(data["job"]))
            except ValueError:
                self.logger.error("Invalid event data: %s", msg)
        return True
Esempio n. 23
0
def read_from_stream(stream, desired_channel, timeout=5):
    start = time.time()
    accumulator = ''
    while not channels[desired_channel] and time.time() - start < timeout:
        try:
            data = stream.get_nowait()
            data = u(data['data']).split('\n')
            accumulator += data.pop(0)
            if data:
                data.insert(0, accumulator)
                accumulator = data.pop()
                for line in data:
                    if len(line) > 1 and line[1] == ':':
                        channel, string = line.partition(':')[::2]
                        channels[int(channel)].append(string)
        except Empty:
            yield tornado_sleep(0.1)
    if channels[desired_channel]:
        raise tornado.gen.Return(channels[desired_channel].pop(0))
    raise TimeoutException('Timeout reading queue')
Esempio n. 24
0
 def test_copy_path(self):
     watcher = SomeWatcher()
     yield watcher.run()
     # wait for watcher data at most 5s
     messages = []
     resp = False
     start_time = time.time()
     while (time.time() - start_time) <= 5:
         yield tornado_sleep(0.5)
         # More than one Queue.get call is needed to get full
         # output from a watcher in an environment with rich sys.path.
         try:
             m = watcher.stream.get(block=False)
             messages.append(m)
         except Queue.Empty:
             pass
         data = ''.join(u(m['data']) for m in messages)
         if 'XYZ' in data:
             resp = True
             break
     self.assertTrue(resp)
     yield watcher.stop()
Esempio n. 25
0
    def run(self):
        self.setup()
        while True:
            msg = self.sub.recv_multipart()
            try:
                (topic, uuid, datetime, username, data) = (u(m) for m in msg)
            except (IndexError, ValueError):
                LOG.error("Invalid message: %s", msg)
                continue

            variables = {"topic": topic,
                         "uuid": uuid,
                         "datetime": datetime,
                         "username": username}
            for (i, m) in enumerate(self.matchers):
                if m.match(variables, data):
                    LOG.debug("%s matching %s", msg, m.name)
                    self.ctrl.send_multipart([b(str(i)),
                                              b(topic),
                                              b(uuid),
                                              b(datetime),
                                              b(username),
                                              b(data)])
Esempio n. 26
0
    def handle_recv(self, data):
        """Handle received message from circusd

        We need to handle two messages:
        - spawn: add a new monitored child pid
        - reap: remove a killed child pid from monitoring
        """
        topic, msg = data
        topic_parts = u(topic).split(".")
        logger.debug("received data from circusd: %s, %s", topic_parts, msg)
        # check if monitored watchers:
        if (topic_parts[0] == 'watcher' and
                self._match_watcher_name(topic_parts[1])):
            try:
                message = json.loads(msg)
            except ValueError:
                logger.error("Error while decoding json for message: %s",
                             msg)
            else:
                if "process_pid" not in message:
                    logger.warning('no process_pid in message')
                    return
                pid = str(message.get("process_pid"))
                if topic_parts[2] == "spawn":
                    self.pid_status[pid] = dict(watcher=topic_parts[1],
                                                last_activity=time.time())
                    logger.info("added new monitored pid for %s:%s",
                                topic_parts[1],
                                pid)
                # very questionable fix for Py3 here!
                # had to add check for pid in self.pid_status
                elif topic_parts[2] == "reap" and pid in self.pid_status:
                    old_pid = self.pid_status.pop(pid)
                    logger.info("removed monitored pid for %s:%s",
                                old_pid['watcher'],
                                pid)
Esempio n. 27
0
    def handle(self, *args, **options):
        # Initialize logging.
        self.setup_logging("lava-logs", options["level"],
                           options["log_file"], FORMAT)

        self.logger.info("[INIT] Dropping privileges")
        if not self.drop_privileges(options['user'], options['group']):
            self.logger.error("[INIT] Unable to drop privileges")
            return

        # Create the sockets
        context = zmq.Context()
        self.log_socket = context.socket(zmq.PULL)
        self.controler = context.socket(zmq.ROUTER)
        self.controler.setsockopt(zmq.IDENTITY, b"lava-logs")
        # Limit the number of messages in the queue
        self.controler.setsockopt(zmq.SNDHWM, 2)
        # From http://api.zeromq.org/4-2:zmq-setsockopt#toc5
        # "Immediately readies that connection for data transfer with the master"
        self.controler.setsockopt(zmq.CONNECT_RID, b"master")

        if options['ipv6']:
            self.logger.info("[INIT] Enabling IPv6")
            self.log_socket.setsockopt(zmq.IPV6, 1)
            self.controler.setsockopt(zmq.IPV6, 1)

        if options['encrypt']:
            self.logger.info("[INIT] Starting encryption")
            try:
                self.auth = ThreadAuthenticator(context)
                self.auth.start()
                self.logger.debug("[INIT] Opening master certificate: %s", options['master_cert'])
                master_public, master_secret = zmq.auth.load_certificate(options['master_cert'])
                self.logger.debug("[INIT] Using slaves certificates from: %s", options['slaves_certs'])
                self.auth.configure_curve(domain='*', location=options['slaves_certs'])
            except IOError as err:
                self.logger.error("[INIT] %s", err)
                self.auth.stop()
                return
            self.log_socket.curve_publickey = master_public
            self.log_socket.curve_secretkey = master_secret
            self.log_socket.curve_server = True
            self.controler.curve_publickey = master_public
            self.controler.curve_secretkey = master_secret
            self.controler.curve_serverkey = master_public

        self.logger.debug("[INIT] Watching %s", options["slaves_certs"])
        self.cert_dir_path = options["slaves_certs"]
        self.inotify_fd = watch_directory(options["slaves_certs"])
        if self.inotify_fd is None:
            self.logger.error("[INIT] Unable to start inotify")

        self.log_socket.bind(options['socket'])
        self.controler.connect(options['master_socket'])

        # Poll on the sockets. This allow to have a
        # nice timeout along with polling.
        self.poller = zmq.Poller()
        self.poller.register(self.log_socket, zmq.POLLIN)
        self.poller.register(self.controler, zmq.POLLIN)
        if self.inotify_fd is not None:
            self.poller.register(os.fdopen(self.inotify_fd), zmq.POLLIN)

        # Translate signals into zmq messages
        (self.pipe_r, _) = self.setup_zmq_signal_handler()
        self.poller.register(self.pipe_r, zmq.POLLIN)

        self.logger.info("[INIT] listening for logs")
        # PING right now: the master is waiting for this message to start
        # scheduling.
        self.controler.send_multipart([b"master", b"PING"])

        try:
            self.main_loop()
        except BaseException as exc:
            self.logger.error("[EXIT] Unknown exception raised, leaving!")
            self.logger.exception(exc)

        # Close the controler socket
        self.controler.close(linger=0)
        self.poller.unregister(self.controler)

        # Carefully close the logging socket as we don't want to lose messages
        self.logger.info("[EXIT] Disconnect logging socket and process messages")
        endpoint = u(self.log_socket.getsockopt(zmq.LAST_ENDPOINT))
        self.logger.debug("[EXIT] unbinding from '%s'", endpoint)
        self.log_socket.unbind(endpoint)

        # Empty the queue
        try:
            while self.wait_for_messages(True):
                # Flush test cases cache for every iteration because we might
                # get killed soon.
                self.flush_test_cases()
        except BaseException as exc:
            self.logger.error("[EXIT] Unknown exception raised, leaving!")
            self.logger.exception(exc)
        finally:
            # Last flush
            self.flush_test_cases()
            self.logger.info("[EXIT] Closing the logging socket: the queue is empty")
            self.log_socket.close()
            if options['encrypt']:
                self.auth.stop()
            context.term()
Esempio n. 28
0
    def logging_socket(self):
        msg = self.log_socket.recv_multipart()
        try:
            (job_id, message) = (u(m) for m in msg)  # pylint: disable=unbalanced-tuple-unpacking
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error("[POLL] failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message, Loader=yaml.CLoader)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except TypeError:
            self.logger.error("[%s] not a dictionary, dropping", job_id)
            return
        except KeyError:
            self.logger.error(
                "[%s] invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        # Find the handler (if available)
        if job_id not in self.jobs:
            # Query the database for the job
            try:
                job = TestJob.objects.get(id=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return

            self.logger.info("[%s] receiving logs from a new job", job_id)
            # Create the sub directories (if needed)
            mkdir(job.output_dir)
            self.jobs[job_id] = JobHandler(job)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job)
            new_test_case = map_scanned_results(results=message_msg, job=job,
                                                meta_filename=meta_filename)
            if new_test_case is None:
                self.logger.warning(
                    "[%s] unable to map scanned results: %s",
                    job_id, message)
            else:
                self.test_cases.append(new_test_case)

            # Look for lava.job result
            if message_msg.get("definition") == "lava" and message_msg.get("case") == "job":
                # Flush cached test cases
                self.flush_test_cases()

                if message_msg.get("result") == "pass":
                    health = TestJob.HEALTH_COMPLETE
                    health_msg = "Complete"
                else:
                    health = TestJob.HEALTH_INCOMPLETE
                    health_msg = "Incomplete"
                self.logger.info("[%s] job status: %s", job_id, health_msg)

                infrastructure_error = (message_msg.get("error_type") in ["Bug",
                                                                          "Configuration",
                                                                          "Infrastructure"])
                if infrastructure_error:
                    self.logger.info("[%s] Infrastructure error", job_id)

                # Update status.
                with transaction.atomic():
                    # TODO: find a way to lock actual_device
                    job = TestJob.objects.select_for_update() \
                                         .get(id=job_id)
                    job.go_state_finished(health, infrastructure_error)
                    job.save()

        # Mark the file handler as used
        self.jobs[job_id].last_usage = time.time()

        # n.b. logging here would produce a log entry for every message in every job.
        # The format is a list of dictionaries
        message = "- %s" % message

        # Write data
        self.jobs[job_id].write(message)
Esempio n. 29
0
 def split_data(data):
     topic, msg = data
     topic_parts = u(topic).split(".")
     return topic_parts[1], topic_parts[2], msg
Esempio n. 30
0
    def _handle_end(self, hostname, msg):
        try:
            job_id = int(msg[2])
            error_msg = u(msg[3])
            compressed_description = msg[4]
        except UnicodeDecodeError:
            self.logger.error("Invalid END message: can't be decoded")
            return
        except (IndexError, ValueError):
            self.logger.error("Invalid END message from <%s> '%s'", hostname,
                              msg)
            return

        try:
            job = TestJob.objects.get(id=job_id)
        except TestJob.DoesNotExist:
            self.logger.error("[%d] Unknown job", job_id)
            # ACK even if the job is unknown to let the dispatcher
            # forget about it
            send_multipart_u(self.controler, [hostname, "END_OK", str(job_id)])
            return

        filename = os.path.join(job.output_dir, "description.yaml")
        # If description.yaml already exists: a END was already received
        if os.path.exists(filename):
            self.logger.info("[%d] %s => END (duplicated), skipping", job_id,
                             hostname)
        else:
            if compressed_description:
                self.logger.info("[%d] %s => END", job_id, hostname)
            else:
                self.logger.info(
                    "[%d] %s => END (lava-run crashed, mark job as INCOMPLETE)",
                    job_id,
                    hostname,
                )
                with transaction.atomic():
                    # TODO: find a way to lock actual_device
                    job = TestJob.objects.select_for_update().get(id=job_id)

                    job.go_state_finished(TestJob.HEALTH_INCOMPLETE)
                    if error_msg:
                        self.logger.error("[%d] Error: %s", job_id, error_msg)
                        job.failure_comment = error_msg
                    job.save()

            # Create description.yaml even if it's empty
            # Allows to know when END messages are duplicated
            try:
                # Create the directory if it was not already created
                mkdir(os.path.dirname(filename))
                # TODO: check that compressed_description is not ""
                description = lzma.decompress(compressed_description)
                with open(filename, "w") as f_description:
                    f_description.write(description.decode("utf-8"))
                if description:
                    parse_job_description(job)
            except (OSError, lzma.LZMAError) as exc:
                self.logger.error("[%d] Unable to dump 'description.yaml'",
                                  job_id)
                self.logger.exception("[%d] %s", job_id, exc)

        # ACK the job and mark the dispatcher as alive
        send_multipart_u(self.controler, [hostname, "END_OK", str(job_id)])
        self.dispatcher_alive(hostname)
Esempio n. 31
0
    def handle(self, *args, **options):
        # Initialize logging.
        self.setup_logging("lava-logs", options["level"], options["log_file"],
                           FORMAT)

        self.logger.info("[INIT] Starting lava-logs")
        self.logger.info("[INIT] Version %s", __version__)

        self.logger.info("[INIT] Dropping privileges")
        if not self.drop_privileges(options["user"], options["group"]):
            self.logger.error("[INIT] Unable to drop privileges")
            return

        filename = os.path.join(settings.MEDIA_ROOT, "lava-logs-config.yaml")
        self.logger.debug("[INIT] Dumping config to %s", filename)
        with open(filename, "w") as output:
            yaml_dump(options, output)

        # Create the sockets
        context = zmq.Context()
        self.log_socket = context.socket(zmq.PULL)
        self.controler = context.socket(zmq.ROUTER)
        self.controler.setsockopt(zmq.IDENTITY, b"lava-logs")
        # Limit the number of messages in the queue
        self.controler.setsockopt(zmq.SNDHWM, 2)
        # From http://api.zeromq.org/4-2:zmq-setsockopt#toc5
        # "Immediately readies that connection for data transfer with the master"
        self.controler.setsockopt(zmq.CONNECT_RID, b"master")

        if options["ipv6"]:
            self.logger.info("[INIT] Enabling IPv6")
            self.log_socket.setsockopt(zmq.IPV6, 1)
            self.controler.setsockopt(zmq.IPV6, 1)

        if options["encrypt"]:
            self.logger.info("[INIT] Starting encryption")
            try:
                self.auth = ThreadAuthenticator(context)
                self.auth.start()
                self.logger.debug("[INIT] Opening master certificate: %s",
                                  options["master_cert"])
                master_public, master_secret = zmq.auth.load_certificate(
                    options["master_cert"])
                self.logger.debug("[INIT] Using slaves certificates from: %s",
                                  options["slaves_certs"])
                self.auth.configure_curve(domain="*",
                                          location=options["slaves_certs"])
            except OSError as err:
                self.logger.error("[INIT] %s", err)
                self.auth.stop()
                return
            self.log_socket.curve_publickey = master_public
            self.log_socket.curve_secretkey = master_secret
            self.log_socket.curve_server = True
            self.controler.curve_publickey = master_public
            self.controler.curve_secretkey = master_secret
            self.controler.curve_serverkey = master_public

        self.logger.debug("[INIT] Watching %s", options["slaves_certs"])
        self.cert_dir_path = options["slaves_certs"]
        self.inotify_fd = watch_directory(options["slaves_certs"])
        if self.inotify_fd is None:
            self.logger.error("[INIT] Unable to start inotify")

        self.log_socket.bind(options["socket"])
        self.controler.connect(options["master_socket"])

        # Poll on the sockets. This allow to have a
        # nice timeout along with polling.
        self.poller = zmq.Poller()
        self.poller.register(self.log_socket, zmq.POLLIN)
        self.poller.register(self.controler, zmq.POLLIN)
        if self.inotify_fd is not None:
            self.poller.register(os.fdopen(self.inotify_fd), zmq.POLLIN)

        # Translate signals into zmq messages
        (self.pipe_r, _) = self.setup_zmq_signal_handler()
        self.poller.register(self.pipe_r, zmq.POLLIN)

        self.logger.info("[INIT] listening for logs")
        # PING right now: the master is waiting for this message to start
        # scheduling.
        self.controler.send_multipart([b"master", b"PING"])

        try:
            self.main_loop()
        except BaseException as exc:
            self.logger.error("[EXIT] Unknown exception raised, leaving!")
            self.logger.exception(exc)

        # Close the controler socket
        self.controler.close(linger=0)
        self.poller.unregister(self.controler)

        # Carefully close the logging socket as we don't want to lose messages
        self.logger.info(
            "[EXIT] Disconnect logging socket and process messages")
        endpoint = u(self.log_socket.getsockopt(zmq.LAST_ENDPOINT))
        self.logger.debug("[EXIT] unbinding from '%s'", endpoint)
        self.log_socket.unbind(endpoint)

        # Empty the queue
        try:
            while self.wait_for_messages(True):
                # Flush test cases cache for every iteration because we might
                # get killed soon.
                self.flush_test_cases()
        except BaseException as exc:
            self.logger.error("[EXIT] Unknown exception raised, leaving!")
            self.logger.exception(exc)
        finally:
            # Last flush
            self.flush_test_cases()
            self.logger.info(
                "[EXIT] Closing the logging socket: the queue is empty")
            self.log_socket.close()
            if options["encrypt"]:
                self.auth.stop()
            context.term()
Esempio n. 32
0
    def handle_zap_message(self, msg):
        """Perform ZAP authentication"""
        if len(msg) < 6:
            self.log.error("Invalid ZAP message, not enough frames: %r", msg)
            if len(msg) < 2:
                self.log.error("Not enough information to reply")
            else:
                self._send_zap_reply(msg[1], b"400", b"Not enough frames")
            return

        version, request_id, domain, address, identity, mechanism = msg[:6]
        credentials = msg[6:]

        domain = u(domain, self.encoding, 'replace')
        address = u(address, self.encoding, 'replace')

        if (version != VERSION):
            self.log.error("Invalid ZAP version: %r", msg)
            self._send_zap_reply(request_id, b"400", b"Invalid version")
            return

        self.log.debug(
            "version: %r, request_id: %r, domain: %r,"
            " address: %r, identity: %r, mechanism: %r",
            version,
            request_id,
            domain,
            address,
            identity,
            mechanism,
        )

        # Is address is explicitly whitelisted or blacklisted?
        allowed = False
        denied = False
        reason = b"NO ACCESS"

        if self.whitelist:
            if address in self.whitelist:
                allowed = True
                self.log.debug("PASSED (whitelist) address=%s", address)
            else:
                denied = True
                reason = b"Address not in whitelist"
                self.log.debug("DENIED (not in whitelist) address=%s", address)

        elif self.blacklist:
            if address in self.blacklist:
                denied = True
                reason = b"Address is blacklisted"
                self.log.debug("DENIED (blacklist) address=%s", address)
            else:
                allowed = True
                self.log.debug("PASSED (not in blacklist) address=%s", address)

        # Perform authentication mechanism-specific checks if necessary
        username = u("user")
        if not denied:

            if mechanism == b'NULL' and not allowed:
                # For NULL, we allow if the address wasn't blacklisted
                self.log.debug("ALLOWED (NULL)")
                allowed = True

            elif mechanism == b'PLAIN':
                # For PLAIN, even a whitelisted address must authenticate
                if len(credentials) != 2:
                    self.log.error("Invalid PLAIN credentials: %r",
                                   credentials)
                    self._send_zap_reply(request_id, b"400",
                                         b"Invalid credentials")
                    return
                username, password = [
                    u(c, self.encoding, 'replace') for c in credentials
                ]
                allowed, reason = self._authenticate_plain(
                    domain, username, password)

            elif mechanism == b'CURVE':
                # For CURVE, even a whitelisted address must authenticate
                if len(credentials) != 1:
                    self.log.error("Invalid CURVE credentials: %r",
                                   credentials)
                    self._send_zap_reply(request_id, b"400",
                                         b"Invalid credentials")
                    return
                key = credentials[0]
                allowed, reason = self._authenticate_curve(domain, key)

            elif mechanism == b'GSSAPI':
                if len(credentials) != 1:
                    self.log.error("Invalid GSSAPI credentials: %r",
                                   credentials)
                    self._send_zap_reply(request_id, b"400",
                                         b"Invalid credentials")
                    return
                principal = u(credentials[0], 'replace')
                allowed, reason = self._authenticate_gssapi(domain, principal)

        if allowed:
            self._send_zap_reply(request_id, b"200", b"OK", username)
        else:
            self._send_zap_reply(request_id, b"400", reason)
Esempio n. 33
0
 def handle_recv(self, data):
     topic, msg = data
     topic_parts = u(topic).split(".")
     watcher = topic_parts[1]
     action = topic_parts[2]
     self.statsd.increment('%s.%s' % (watcher, action))
Esempio n. 34
0
    def handle(self, *args, **options):
        self.setup_logging("lava-publisher", options["level"],
                           options["log_file"], FORMAT)

        self.logger.info("[INIT] Dropping privileges")
        if not self.drop_privileges(options['user'], options['group']):
            self.logger.error("[INIT] Unable to drop privileges")
            return

        if not settings.EVENT_NOTIFICATION:
            self.logger.error("[INIT] 'EVENT_NOTIFICATION' is set to False, "
                              "LAVA won't generated any events")

        self.logger.info("[INIT] Creating the input socket at %s",
                         settings.INTERNAL_EVENT_SOCKET)
        context = zmq.Context.instance()
        self.pull = context.socket(zmq.PULL)
        self.pull.bind(settings.INTERNAL_EVENT_SOCKET)
        self.poller = zmq.Poller()
        self.poller.register(self.pull, zmq.POLLIN)

        # Translate signals into zmq messages
        (self.pipe_r, _) = self.setup_zmq_signal_handler()
        self.poller.register(self.pipe_r, zmq.POLLIN)

        # Create the default publishing socket
        self.logger.info("[INIT] Creating the publication socket at %s",
                         settings.EVENT_SOCKET)
        self.pub = context.socket(zmq.PUB)
        self.pub.bind(settings.EVENT_SOCKET)
        # Create the additional PUSH sockets
        if settings.EVENT_ADDITIONAL_SOCKETS:
            self.logger.info("[INIT] Creating the additional sockets:")
        self.additional_sockets = []
        for url in settings.EVENT_ADDITIONAL_SOCKETS:
            self.logger.info("[INIT]  * %s", url)
            sock = context.socket(zmq.PUSH)
            # Allow zmq to keep 10000 pending messages in each queue
            sock.setsockopt(zmq.SNDHWM, 10000)
            sock.connect(url)
            self.additional_sockets.append(sock)

        self.logger.info("[INIT] Starting the proxy")
        while self.forward_event(False):
            pass

        # Carefully close the logging socket as we don't want to lose messages
        self.logger.info("[EXIT] Disconnect pull socket and process messages")
        endpoint = u(self.pull.getsockopt(zmq.LAST_ENDPOINT))
        self.logger.debug("[EXIT] unbinding from '%s'", endpoint)
        self.pull.unbind(endpoint)

        while self.forward_event(True):
            pass

        # Close the sockets allowing 1s each to leave
        self.logger.info("[EXIT] Closing the sockets: the queue is empty")
        self.pull.close(linger=1)
        self.pub.close(linger=1)
        for socket in self.additional_sockets:
            socket.close(linger=1)
        context.term()
Esempio n. 35
0
"""0MQ authentication related functions and classes."""

# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.

import datetime
import glob
import io
import os
import zmq
from zmq.utils.strtypes import bytes, unicode, b, u

_cert_secret_banner = u("""#   ****  Generated on {0} by pyzmq  ****
#   ZeroMQ CURVE **Secret** Certificate
#   DO NOT PROVIDE THIS FILE TO OTHER USERS nor change its permissions.

""")

_cert_public_banner = u("""#   ****  Generated on {0} by pyzmq  ****
#   ZeroMQ CURVE Public Certificate
#   Exchange securely, or use a secure mechanism to verify the contents
#   of this file after exchange. Store public certificates in your home
#   directory, in the .curve subdirectory.

""")


def _write_key_file(key_filename,
                    banner,
                    public_key,
                    secret_key=None,
Esempio n. 36
0
    def logging_socket(self):
        msg = self.log_socket.recv_multipart()
        try:
            (job_id, message) = (u(m) for m in msg)
        except UnicodeDecodeError:
            self.logger.error("[POLL] Invalid log message: can't be decoded")
            return
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error(
                "[POLL] failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml_load(message)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except TypeError:
            self.logger.error("[%s] not a dictionary, dropping", job_id)
            return
        except KeyError:
            self.logger.error(
                '[%s] invalid log line, missing "lvl" or "msg" keys: %s',
                job_id,
                message,
            )
            return

        # Find the handler (if available)
        if job_id not in self.jobs:
            # Query the database for the job
            try:
                job = TestJob.objects.get(id=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return

            self.logger.info("[%s] receiving logs from a new job", job_id)
            # Create the sub directories (if needed)
            mkdir(job.output_dir)
            self.jobs[job_id] = JobHandler(job)

        # For 'event', send an event and log as 'debug'
        if message_lvl == "event":
            self.logger.debug("[%s] event: %s", job_id, message_msg)
            send_event(".event", "lavaserver", {
                "message": message_msg,
                "job": job_id
            })
            message_lvl = "debug"
        # For 'marker', save in the database and log as 'debug'
        elif message_lvl == "marker":
            # TODO: save on the file system in case of lava-logs restart
            m_type = message_msg.get("type")
            case = message_msg.get("case")
            if m_type is None or case is None:
                self.logger.error("[%s] invalid marker: %s", job_id,
                                  message_msg)
                return
            # This is in fact the previous line
            line = self.jobs[job_id].line_count() - 1
            self.jobs[job_id].markers.setdefault(case, {})[m_type] = line
            return

        # Mark the file handler as used
        self.jobs[job_id].last_usage = time.time()
        # The format is a list of dictionaries
        self.jobs[job_id].write("- %s" % message)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job)
            new_test_case = map_scanned_results(
                results=message_msg,
                job=job,
                markers=self.jobs[job_id].markers,
                meta_filename=meta_filename,
            )

            if new_test_case is None:
                self.logger.warning("[%s] unable to map scanned results: %s",
                                    job_id, message)
            else:
                self.test_cases.append(new_test_case)

            # Look for lava.job result
            if (message_msg.get("definition") == "lava"
                    and message_msg.get("case") == "job"):
                # Flush cached test cases
                self.flush_test_cases()

                if message_msg.get("result") == "pass":
                    health = TestJob.HEALTH_COMPLETE
                    health_msg = "Complete"
                else:
                    health = TestJob.HEALTH_INCOMPLETE
                    health_msg = "Incomplete"
                self.logger.info("[%s] job status: %s", job_id, health_msg)

                infrastructure_error = message_msg.get("error_type") in [
                    "Bug",
                    "Configuration",
                    "Infrastructure",
                ]
                if infrastructure_error:
                    self.logger.info("[%s] Infrastructure error", job_id)

                # Update status.
                with transaction.atomic():
                    # TODO: find a way to lock actual_device
                    job = TestJob.objects.select_for_update().get(id=job_id)
                    job.go_state_finished(health, infrastructure_error)
                    job.save()
Esempio n. 37
0
    def logging_socket(self):
        msg = self.log_socket.recv_multipart()
        try:
            (job_id, message) = (u(m) for m in msg)  # pylint: disable=unbalanced-tuple-unpacking
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error(
                "[POLL] failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message, Loader=yaml.CLoader)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except TypeError:
            self.logger.error("[%s] not a dictionary, dropping", job_id)
            return
        except KeyError:
            self.logger.error(
                "[%s] invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        # Find the handler (if available)
        if job_id not in self.jobs:
            # Query the database for the job
            try:
                job = TestJob.objects.get(id=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return

            self.logger.info("[%s] receiving logs from a new job", job_id)
            # Create the sub directories (if needed)
            mkdir(job.output_dir)
            self.jobs[job_id] = JobHandler(job)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job)
            new_test_case = map_scanned_results(results=message_msg,
                                                job=job,
                                                meta_filename=meta_filename)
            if new_test_case is None:
                self.logger.warning("[%s] unable to map scanned results: %s",
                                    job_id, message)
            else:
                self.test_cases.append(new_test_case)

            # Look for lava.job result
            if message_msg.get("definition") == "lava" and message_msg.get(
                    "case") == "job":
                # Flush cached test cases
                self.flush_test_cases()

                if message_msg.get("result") == "pass":
                    health = TestJob.HEALTH_COMPLETE
                    health_msg = "Complete"
                else:
                    health = TestJob.HEALTH_INCOMPLETE
                    health_msg = "Incomplete"
                self.logger.info("[%s] job status: %s", job_id, health_msg)

                infrastructure_error = (message_msg.get("error_type") in [
                    "Bug", "Configuration", "Infrastructure"
                ])
                if infrastructure_error:
                    self.logger.info("[%s] Infrastructure error", job_id)

                # Update status.
                with transaction.atomic():
                    # TODO: find a way to lock actual_device
                    job = TestJob.objects.select_for_update() \
                                         .get(id=job_id)
                    job.go_state_finished(health, infrastructure_error)
                    job.save()

        # Mark the file handler as used
        self.jobs[job_id].last_usage = time.time()

        # n.b. logging here would produce a log entry for every message in every job.
        # The format is a list of dictionaries
        message = "- %s" % message

        # Write data
        self.jobs[job_id].write(message)
Esempio n. 38
0
    def handle_zap_message(self, msg):
        """Perform ZAP authentication"""
        if len(msg) < 6:
            self.log.error("Invalid ZAP message, not enough frames: %r", msg)
            if len(msg) < 2:
                self.log.error("Not enough information to reply")
            else:
                self._send_zap_reply(msg[1], b"400", b"Not enough frames")
            return
        
        version, request_id, domain, address, identity, mechanism = msg[:6]
        credentials = msg[6:]
        
        domain = u(domain, self.encoding, 'replace')
        address = u(address, self.encoding, 'replace')

        if (version != VERSION):
            self.log.error("Invalid ZAP version: %r", msg)
            self._send_zap_reply(request_id, b"400", b"Invalid version")
            return

        self.log.debug("version: %r, request_id: %r, domain: %r,"
                      " address: %r, identity: %r, mechanism: %r",
                      version, request_id, domain,
                      address, identity, mechanism,
        )


        # Is address is explicitly whitelisted or blacklisted?
        allowed = False
        denied = False
        reason = b"NO ACCESS"

        if self.whitelist:
            if address in self.whitelist:
                allowed = True
                self.log.debug("PASSED (whitelist) address=%s", address)
            else:
                denied = True
                reason = b"Address not in whitelist"
                self.log.debug("DENIED (not in whitelist) address=%s", address)

        elif self.blacklist:
            if address in self.blacklist:
                denied = True
                reason = b"Address is blacklisted"
                self.log.debug("DENIED (blacklist) address=%s", address)
            else:
                allowed = True
                self.log.debug("PASSED (not in blacklist) address=%s", address)

        # Perform authentication mechanism-specific checks if necessary
        username = u("user")
        if not denied:

            if mechanism == b'NULL' and not allowed:
                # For NULL, we allow if the address wasn't blacklisted
                self.log.debug("ALLOWED (NULL)")
                allowed = True

            elif mechanism == b'PLAIN':
                # For PLAIN, even a whitelisted address must authenticate
                if len(credentials) != 2:
                    self.log.error("Invalid PLAIN credentials: %r", credentials)
                    self._send_zap_reply(request_id, b"400", b"Invalid credentials")
                    return
                username, password = [ u(c, self.encoding, 'replace') for c in credentials ]
                allowed, reason = self._authenticate_plain(domain, username, password)

            elif mechanism == b'CURVE':
                # For CURVE, even a whitelisted address must authenticate
                if len(credentials) != 1:
                    self.log.error("Invalid CURVE credentials: %r", credentials)
                    self._send_zap_reply(request_id, b"400", b"Invalid credentials")
                    return
                key = credentials[0]
                allowed, reason = self._authenticate_curve(domain, key)

        if allowed:
            self._send_zap_reply(request_id, b"200", b"OK", username)
        else:
            self._send_zap_reply(request_id, b"400", reason)
Esempio n. 39
0
    def handle_zap_message(self, msg):
        """Perform ZAP authentication"""
        version, sequence, domain, address, identity, mechanism = msg[:6]
        domain = u(domain, self.encoding, 'replace')
        address = u(address, self.encoding, 'replace')

        if (version != b"1.0"):
            self._send_zap_reply(sequence, b"400", b"Invalid version")
            return

        self.log.debug(
            "version: %s, sequence: %s, domain: %s,"
            " address: %s, identity: %s, mechanism: %s",
            version,
            sequence,
            domain,
            address,
            identity,
            mechanism,
        )

        # Is address is explicitly whitelisted or blacklisted?
        allowed = False
        denied = False
        reason = b"NO ACCESS"

        if self.whitelist:
            if address in self.whitelist:
                allowed = True
                self.log.debug("PASSED (whitelist) address=%s", address)
            else:
                denied = True
                reason = b"Address not in whitelist"
                self.log.debug("DENIED (not in whitelist) address=%s", address)

        elif self.blacklist:
            if address in self.blacklist:
                denied = True
                reason = b"Address is blacklisted"
                self.log.debug("DENIED (blacklist) address=%s", address)
            else:
                allowed = True
                self.log.debug("PASSED (not in blacklist) address=%s", address)

        # Perform authentication mechanism-specific checks if necessary
        if not denied:

            if mechanism == b'NULL' and not allowed:
                # For NULL, we allow if the address wasn't blacklisted
                self.log.debug("ALLOWED (NULL)")
                allowed = True

            elif mechanism == b'PLAIN':
                # For PLAIN, even a whitelisted address must authenticate
                username = u(msg[6], self.encoding, 'replace')
                password = u(msg[7], self.encoding, 'replace')
                allowed, reason = self._authenticate_plain(
                    domain, username, password)

            elif mechanism == b'CURVE':
                # For CURVE, even a whitelisted address must authenticate
                key = msg[6]
                allowed, reason = self._authenticate_curve(domain, key)

        if allowed:
            self._send_zap_reply(sequence, b"200", b"OK")
        else:
            self._send_zap_reply(sequence, b"400", reason)