Пример #1
0
	def __handle_versions(self, params):
		"""
		Parse the client's message about the plugins it knows.
		Activate and deactivate plugins accordingly.
		"""
		versions = {}
		while len(params) > 0:
			(name, params) = extract_string(params)
			(version, md5_hash) = struct.unpack('!H16s', params[:18])
			(lib, params) = extract_string(params[18:])
			p_activity = params[0]
			params = params[1:]
			versions[name] = {
				'name': name,
				'version': version,
				'hash': md5_hash,
				'activity': (p_activity == 'A'),
				'lib': lib
			}
		self.__check_versions(versions)
		now = database.now()
		def log_versions(transaction):
			logger.debug("Replacing plugin list of %s", self.cid())
			# The current state (override anything previous)
			transaction.execute('DELETE FROM active_plugins WHERE client IN (SELECT id FROM clients WHERE name = %s)', (self.cid(),))
			transaction.executemany("INSERT INTO active_plugins (client, name, updated, version, hash, libname, active) SELECT clients.id, %s, %s, %s, %s, %s, %s FROM clients WHERE clients.name = %s", map(lambda plug: (plug['name'], now, plug['version'], plug['hash'].encode('hex'), plug['lib'], plug['activity'], self.cid()), versions.values()))
			# The history, just append (yes, there may be duplicates, but who cares)
			transaction.executemany("INSERT INTO plugin_history (client, name, timestamp, version, hash, active) SELECT clients.id, %s, %s, %s, %s, %s FROM clients WHERE clients.name = %s", map(lambda plug: (plug['name'], now, plug['version'], plug['hash'].encode('hex'), plug['activity'], self.cid()), versions.values()))
			return True
		activity.push(log_versions)
Пример #2
0
 def datagramReceived(self, dgram, addr):
     logger.trace("Packet from %s", addr)
     if len(dgram) < 13:
         logger.warn(
             "Spoof packet too short (probably a stray one), only %s bytes",
             len(dgram))
         return
     (magic, token, spoofed) = struct.unpack('!LQ?', dgram[:13])
     if magic != 0x17ACEE43:
         logger.warn(
             "Wrong magic number in spoof packet (probably a stray one)")
         return
     tok = self.__spoof.get_token(token)
     if not tok:
         logger.warn("Token %s not known", token)
         return
     if spoofed:
         tok.expect_spoofed = False
     else:
         tok.expect_ordinary = False
     if not tok.expect_spoofed and not tok.expect_ordinary:
         self.__spoof.drop_token(token)
     reactor.callInThread(store_packet, tok, spoofed, (not spoofed)
                          or (addr[0] == self.__spoof.src_addr()), addr[0],
                          database.now())
     activity.log_activity(tok.client(), 'spoof')
Пример #3
0
 def message_from_client(self, message, client):
     if message[0] == 'L':
         activity.log_activity(client, 'fake')
         # max_size - sizeof(14)
         if not self.__rate_limiter.check_rate(client, 1):
             logger.warn(
                 "Storing fake server log events for client %s blocked by rate limiter.",
                 client)
             return
         # maximal number of records in one message is not specified for fake plugin (as far as I know)
         # but the message shouldn't be much larger then max_size bytes (from documentation)
         # so 3 times max_size should be fine (2 times when the client reconnects + reserve - "the message may be actually larger, usually by the last event")
         if len(message[1:]) > 3 * int(self.__config['max_size']):
             logger.warn(
                 "Unexpectedly long message for fake plugin from client %s - %s bytes, max expected size %s. Ignoring.",
                 client, len(message[1:]), 3 * self.__config['max_size'])
             return
         reactor.callInThread(store_logs, message[1:], client,
                              database.now(), self.version(client))
     elif message[0] == 'C':
         config = struct.pack(
             '!IIIII',
             *map(lambda name: int(self.__config[name]), [
                 'version', 'max_age', 'max_size', 'max_attempts',
                 'throttle_holdback'
             ]))
         self.send('C' + config, client)
     else:
         logger.error("Unknown message from client %s: %s", client, message)
Пример #4
0
	def message_from_client(self, message, client):
		if message[0] == 'L':
			activity.log_activity(client, 'fake')
			reactor.callInThread(store_logs, message[1:], client, database.now(), self.version(client))
		elif message[0] == 'C':
			config = struct.pack('!IIIII', *map(lambda name: int(self.__config[name]), ['version', 'max_age', 'max_size', 'max_attempts', 'throttle_holdback']))
			self.send('C' + config, client)
		else:
			logger.error("Unknown message from client %s: %s", client, message)
Пример #5
0
	def __process(self):
		if not self.__data:
			return # No data to store.
		# As manipulation with DM might be time consuming (as it may be blocking, etc),
		# move it to a separate thread, so we don't block the communication. This is
		# safe -- we pass all the needed data to it as parameters and get rid of our
		# copy, passing the ownership to the task.
		reactor.callInThread(store_bandwidth, self.__data, database.now())
		self.__data = {}
Пример #6
0
 def __process(self):
     if not self.__data:
         return  # No data to store.
     # As manipulation with DM might be time consuming (as it may be blocking, etc),
     # move it to a separate thread, so we don't block the communication. This is
     # safe -- we pass all the needed data to it as parameters and get rid of our
     # copy, passing the ownership to the task.
     reactor.callInThread(store_bandwidth, self.__data, database.now())
     self.__data = {}
Пример #7
0
	def message_from_client(self, message, client):
		if message == 'C':
			logger.debug("Sending config %s to client %s", self.__config['version'], client)
			config = struct.pack('!IIIIQQ', *map(lambda name: int(self.__config[name]), ['version', 'finished_limit', 'send_limit', 'undecided_limit', 'timeout', 'max_age']))
			self.send('C' + config, client)
		elif message[0] == 'D':
			activity.log_activity(client, 'refused')
			reactor.callInThread(store_connections, message[1:], client, database.now())
		else:
			logger.error("Unknown message from client %s: %s", client, message)
Пример #8
0
 def do_perf_v2_test(self, tasks, ip_to, ip_from, port_list):
     '''
     args:
         tasks     : multiprocessing.JoinableQueue, the task queue to consumers
         ip_to     : str, the ip address of test node
         ip_from   : str, the ip address of test node
         port_list : list(int), the port list genreate by Producer
     '''
     idx = sum([self.nodes_ip.index(ip) for ip in [ip_to, ip_from]]) % len(self.nodes_ip)
     port = port_list[idx]
     tasks.put(Task(kind=TaskKind.PERFV2TEST, ip=[ip_to, ip_from], port=port))
     self.db.update_top([ip_to, ip_from], TaskKind.PERFV2TEST, Result.WAIT, now())
Пример #9
0
 def message_from_client(self, message, client):
     if message[0] == 'L':
         activity.log_activity(client, 'fake')
         reactor.callInThread(store_logs, message[1:], client,
                              database.now(), self.version(client))
     elif message[0] == 'C':
         config = struct.pack(
             '!IIIII',
             *map(lambda name: int(self.__config[name]), [
                 'version', 'max_age', 'max_size', 'max_attempts',
                 'throttle_holdback'
             ]))
         self.send('C' + config, client)
     else:
         logger.error("Unknown message from client %s: %s", client, message)
Пример #10
0
	def connectionLost(self, reason):
		if not self.__connected:
			return
		self.__connected = False
		if self.__logged_in:
			logger.info("Connection lost from %s", self.cid())
			self.__pinger.stop()
			self.__plugins.unregister_client(self)
			now = database.now()
			def log_plugins(transaction):
				logger.debug("Dropping plugin list of %s", self.cid())
				transaction.execute("INSERT INTO plugin_history (client, name, timestamp, active) SELECT ap.client, ap.name, %s, false FROM active_plugins AS ap JOIN clients ON ap.client = clients.id WHERE clients.name = %s", (now, self.cid()))
				transaction.execute('DELETE FROM active_plugins WHERE client IN (SELECT id FROM clients WHERE name = %s)', (self.cid(),))
				return True
			activity.push(log_plugins)
			activity.log_activity(self.cid(), "logout")
			self.transport.abortConnection()
Пример #11
0
    def __handle_versions(self, params):
        """
		Parse the client's message about the plugins it knows.
		Activate and deactivate plugins accordingly.
		"""
        versions = {}
        while len(params) > 0:
            (name, params) = extract_string(params)
            (version, md5_hash) = struct.unpack('!H16s', params[:18])
            (lib, params) = extract_string(params[18:])
            p_activity = params[0]
            params = params[1:]
            versions[name] = {
                'name': name,
                'version': version,
                'hash': md5_hash,
                'activity': (p_activity == 'A'),
                'lib': lib
            }
        self.__check_versions(versions)
        now = database.now()

        def log_versions(transaction):
            logger.debug("Replacing plugin list of %s", self.cid())
            # The current state (override anything previous)
            transaction.execute(
                'DELETE FROM active_plugins WHERE client IN (SELECT id FROM clients WHERE name = %s)',
                (self.cid(), ))
            transaction.executemany(
                "INSERT INTO active_plugins (client, name, updated, version, hash, libname, active) SELECT clients.id, %s, %s, %s, %s, %s, %s FROM clients WHERE clients.name = %s",
                map(
                    lambda plug:
                    (plug['name'], now, plug['version'], plug['hash'].encode(
                        'hex'), plug['lib'], plug['activity'], self.cid()),
                    versions.values()))
            # The history, just append (yes, there may be duplicates, but who cares)
            transaction.executemany(
                "INSERT INTO plugin_history (client, name, timestamp, version, hash, active) SELECT clients.id, %s, %s, %s, %s, %s FROM clients WHERE clients.name = %s",
                map(
                    lambda plug: (plug['name'], now, plug['version'], plug[
                        'hash'].encode('hex'), plug['activity'], self.cid()),
                    versions.values()))
            return True

        activity.push(log_versions)
Пример #12
0
 def message_from_client(self, message, client):
     if message[0] == 'C':
         logger.debug('Sending config to %s', client)
         if self.version(client) < 2:
             self.send(self.__build_config(''), client)
         else:
             self.send(self.__build_config('-diff'), client)
             for a in self._addresses:
                 self.send(
                     self.__build_filter_version(a, self._addresses[a][0],
                                                 self._addresses[a][1]),
                     client)
     elif message[0] == 'D':
         logger.debug('Flows from %s', client)
         activity.log_activity(client, 'flow')
         reactor.callInThread(store_flows, client, message[1:],
                              int(self._conf['version']), database.now())
     elif message[0] == 'U':
         self._provide_diff(message[1:], client)
Пример #13
0
    def __process_keys(self):
        """
		Extract the keys aggregated in groups and send them to storage.
		"""
        self.__aggregating_keys = False
        if not self.__have_keys:
            # We are done with NOP now.
            self.__background_processing = False
            return

        def done(result):
            self.__background_processing = False
            if result is Failure:
                logger.error("Failed to push anomalies to DB: %s",
                             result.value)

        deferred = threads.deferToThread(store_keys, self.__groups,
                                         database.now())
        deferred.addCallback(done)
Пример #14
0
    def connectionLost(self, reason):
        if not self.__connected:
            return
        self.__connected = False
        if self.__logged_in:
            logger.info("Connection lost from %s", self.cid())
            self.__pinger.stop()
            self.__plugins.unregister_client(self)
            now = database.now()

            def log_plugins(transaction):
                logger.debug("Dropping plugin list of %s", self.cid())
                transaction.execute(
                    "INSERT INTO plugin_history (client, name, timestamp, active) SELECT ap.client, ap.name, %s, false FROM active_plugins AS ap JOIN clients ON ap.client = clients.id WHERE clients.name = %s",
                    (now, self.cid()))
                transaction.execute(
                    'DELETE FROM active_plugins WHERE client IN (SELECT id FROM clients WHERE name = %s)',
                    (self.cid(), ))
                return True

            activity.push(log_plugins)
            activity.log_activity(self.cid(), "logout")
            self.transport.abortConnection()
Пример #15
0
 def message_from_client(self, message, client):
     if message == 'C':
         logger.debug("Sending config %s to client %s",
                      self.__config['version'], client)
         config = struct.pack(
             '!IIIIQQ',
             *map(lambda name: int(self.__config[name]), [
                 'version', 'finished_limit', 'send_limit',
                 'undecided_limit', 'timeout', 'max_age'
             ]))
         self.send('C' + config, client)
     elif message[0] == 'D':
         activity.log_activity(client, 'refused')
         if not self.__rate_limiter.check_rate(client, 1):
             logger.warn(
                 "Storing refused connections for client %s blocked by rate limiter.",
                 client)
             return
         # the limit for the number of records in a message is 2*send_limit because the client may buffer up to two times the number if he disconnects/reconnects
         reactor.callInThread(store_connections,
                              2 * int(self.__config['send_limit']),
                              message[1:], client, database.now())
     else:
         logger.error("Unknown message from client %s: %s", client, message)
Пример #16
0
 def success(self, client, payload):
     reactor.callInThread(store_certs, client, payload, self.__hosts,
                          self.__batch_time, database.now())
     log_activity(client, 'certs')
Пример #17
0
 def success(self, client, payload):
     reactor.callInThread(submit_data, client, payload, self.__hosts,
                          self.__batch_time, database.now())
     log_activity(client, 'pings')
Пример #18
0
	def message_from_client(self, message, client):
		if message[0] == 'C':
			if client in self.__delayed_config:
				# The client asks for a second time in a short while. Don't send anything now, but do send it a bit later
				logger.info('Delaying config for %s', client)
				self.__delayed_config[client] = True
				return
			logger.debug('Sending config to %s', client)
			self.__delayed_config[client] = False # We know about the client, but it hasn't asked twice yet.
			if self.version(client) < 2:
				self.send(self.__build_config(''), client)
			else:
				self.send(self.__build_config('-diff'), client)
				for a in self._addresses:
					self.send(self.__build_filter_version(a, self._addresses[a][0], self._addresses[a][1]), client)
		elif message[0] == 'D':
			logger.debug('Flows from %s', client)
			activity.log_activity(client, 'flow')
			if not self.__rate_limiter.check_rate(client, 1):
				logger.warn("Storing flows for client %s blocked by rate limiter.", client)
				return
			# the limit for the number of records in a message is 2*max_flows because the client may buffer up to two times the number if he disconnects/reconnects
			reactor.callInThread(store_flows, 2 * int(self._conf['max_flows']), client, message[1:], int(self._conf['version']), database.now())
		elif message[0] == 'U':
			self._provide_diff(message[1:], client)
Пример #19
0
	def message_from_client(self, message, client):
		if message[0] == 'C':
			logger.debug('Sending config to %s', client)
			if self.version(client) < 2:
				self.send(self.__build_config(''), client)
			else:
				self.send(self.__build_config('-diff'), client)
				for a in self._addresses:
					self.send(self.__build_filter_version(a, self._addresses[a][0], self._addresses[a][1]), client)
		elif message[0] == 'D':
			logger.debug('Flows from %s', client)
			activity.log_activity(client, 'flow')
			reactor.callInThread(store_flows, client, message[1:], int(self._conf['version']), database.now())
		elif message[0] == 'U':
			self._provide_diff(message[1:], client)
Пример #20
0
	def message_from_client(self, message, client):
		if message == 'C':
			logger.debug("Sending config %s to client %s", self.__config['version'], client)
			config = struct.pack('!IIIIQQ', *map(lambda name: int(self.__config[name]), ['version', 'finished_limit', 'send_limit', 'undecided_limit', 'timeout', 'max_age']))
			self.send('C' + config, client)
		elif message[0] == 'D':
			activity.log_activity(client, 'refused')
			if not self.__rate_limiter.check_rate(client, 1):
				logger.warn("Storing refused connections for client %s blocked by rate limiter.", client)
				return
			# the limit for the number of records in a message is 2*send_limit because the client may buffer up to two times the number if he disconnects/reconnects
			reactor.callInThread(store_connections, 2 * int(self.__config['send_limit']), message[1:], client, database.now())
		else:
			logger.error("Unknown message from client %s: %s", client, message)
Пример #21
0
 def signal_handler(*args):
     for c in consumers:
         os.kill(c.pid, signal.SIGTERM)
     db.update_info(-1, end=now())
     sys.exit()
Пример #22
0
    def run(self):
        # Store pid, date into DataBase
        self.db.update_info(self.pid, start=now())

        # Init node info
        nodes_info = {}
        for ip in self.nodes_ip:
            nodes_info[ip] = NodeInfo(ip)

        # Establish communication queues
        tasks = multiprocessing.JoinableQueue()
        results = multiprocessing.Queue()

        # Create result path
        remove_dir(self.result_path)
        make_dir(self.result_path)
        for kind in TaskKind:
            make_dir(os.path.join(self.result_path, kind.value))

        # Start consumers
        print('Creating {} consumers'.format(self.num_consumers))
        consumers = [
            Consumer(tasks, results, self.result_path, self.target_path)
            for _ in range(self.num_consumers)
        ]

        for w in consumers:
            w.daemon = True
            w.start()
        

        db = self.db
        def signal_handler(*args):
            for c in consumers:
                os.kill(c.pid, signal.SIGTERM)
            db.update_info(-1, end=now())
            sys.exit()
        signal.signal(signal.SIGTERM, signal_handler)

        # Start Time
        print("Test Start at {}".format(now()))

        # Number of Task have enqueue
        ntasks = 0

        # Enqueue task first - "no password check"
        for ip in self.nodes_ip:
            tasks.put(Task(kind=TaskKind.NOPWCHECK, ip=ip))
            nodes_info[ip].status = TaskKind.NOPWCHECK
            # +2 because there is ACCEPT and SUCC -> two result
            ntasks += 2
            self.db.update_top(ip, TaskKind.NOPWCHECK, Result.WAIT, now())
        
        conn_check_waiting_list = []
        
        # Generate port List for ucx test
        # [2001, 2002, ..., 2000+len(nodes_ip)]
        port_list = [2001+i for i in range(0, len(self.nodes_ip))]
        

        while ntasks > 0:
            result = results.get()
            ntasks -= 1

            print('Result: {}'.format(result))

            if result.code == Result.SUCC:
                # enqueue more task
                if result.kind == TaskKind.NOPWCHECK:
                    tasks.put(Task(kind=TaskKind.ENVCHECK, ip=result.ip))
                    nodes_info[result.ip].status = TaskKind.ENVCHECK
                    ntasks += 2
                    self.db.update_top(result.ip, TaskKind.ENVCHECK, Result.WAIT, now())

                elif result.kind == TaskKind.ENVCHECK:
                    tasks.put(Task(kind=TaskKind.SETUP, ip=result.ip))
                    nodes_info[result.ip].status = TaskKind.SETUP
                    ntasks += 2
                    self.db.update_top(result.ip, TaskKind.SETUP, Result.WAIT, now())

                elif result.kind == TaskKind.SETUP:
                    self.db.delete_top(result.ip)
                    for ip in conn_check_waiting_list:
                        ip1, ip2 = (result.ip, ip) if IPAddress(result.ip) < IPAddress(ip) else (ip, result.ip)
                        tasks.put(Task(kind=TaskKind.CONNCHECK, ip=[ip1, ip2]))
                        ntasks += 2
                        self.db.update_top([ip1, ip2], TaskKind.CONNCHECK, Result.WAIT, now())
                    conn_check_waiting_list.append(result.ip)
                    nodes_info[result.ip].status = TaskKind.CONNCHECK
                
                elif result.kind == TaskKind.CONNCHECK:
                    self.db.delete_top(result.ip)
                    ip1 = result.ip[0]
                    ip2 = result.ip[1]

                    if self.same_cluster(ip1, ip2):
                        continue

                    if nodes_info[ip1].occupied is False and \
                        nodes_info[ip2].occupied is False:
                        # both ip is available
                        self.do_ucx_test(tasks, ip1, ip2, port_list)
                        ntasks += 2
                        
                        # Occupied
                        nodes_info[ip1].occupied = True
                        nodes_info[ip2].occupied = True
                    else:
                        # at lease have one ip is not available
                        nodes_info[ip1].dep_list.append(ip2)
                        nodes_info[ip2].dep_list.append(ip1)

                elif result.kind == TaskKind.UCXTEST:
                    self.db.delete_top(result.ip)

                    ip1 = result.ip[0]
                    ip2 = result.ip[1]
                    self.handle_ucx_test_result(result)
                    
                    # Just go ahead and do next two-IPs test
                    assert nodes_info[ip1].occupied == True
                    assert nodes_info[ip2].occupied == True

                    # Do perf_v2_test
                    self.do_perf_v2_test(tasks, ip1, ip2, port_list)
                    ntasks += 2

                    # Maybe have to enqueue more task
                elif result.kind == TaskKind.PERFV2TEST:
                    self.db.delete_top(result.ip)
                    self.handle_perf_v2_test_result(result)

                    nodes_info[result.ip[0]].occupied = False
                    nodes_info[result.ip[1]].occupied = False

                    # Find dependence, and Enqueue new UCX task
                    for ipx in result.ip:
                        if nodes_info[ipx].occupied is True: continue
                        for ipy in nodes_info[ipx].dep_list :
                            if nodes_info[ipy].occupied is False:
                                ip1, ip2 = (ipx, ipy) if IPAddress(ipx) < IPAddress(ipy) else (ipy, ipx)
                                self.do_ucx_test(tasks, ip1, ip2, port_list)
                                ntasks += 2
                                
                                # Occupied
                                nodes_info[ip1].occupied = True
                                nodes_info[ip2].occupied = True

                                # Remove from List
                                nodes_info[ip1].dep_list.remove(ip2)
                                nodes_info[ip2].dep_list.remove(ip1)

                                break


            elif result.code == Result.FAILED:
                if type(result.ip) == str:
                    nodes_info[result.ip].status = Result.FAILED
                    self.db.update_top(result.ip, result.kind, Result.FAILED, now())
                else:
                    assert type(result.ip) == list
                    assert len(result.ip) == 2
                    nodes_info[result.ip[0]].status = Result.FAILED
                    nodes_info[result.ip[1]].status = Result.FAILED
                    self.db.update_top(result.ip, result.kind, Result.FAILED, now())

                    # release
                    nodes_info[result.ip[0]].ocupied = False
                    nodes_info[result.ip[1]].ocupied = False

            elif result.code == Result.ACCEPT:
                # Update database
                self.db.update_top(result.ip, result.kind, Result.ACCEPT, now())
            

        # CleanUp for every remote machine
        for ip in self.nodes_ip:
            tasks.put(Task(kind=TaskKind.CLEAN, ip=ip))
            ntasks += 2
            # Do not show CLEAN phase (it will overwrite previous failure information)
            #self.db.update_top(ip, TaskKind.CLEAN, Result.WAIT, now())

        while ntasks > 0:
            result = results.get()
            ntasks -= 1

            print('Result: {}'.format(result))
            if result.kind == TaskKind.CLEAN:
                #self.db.delete_top(result.ip)
                if result.code == Result.FAILED:
                    assert type(result.ip) == str, "CLEAN IP: {}".format(result.ip)
                    #self.db.update_top(result.ip, result.kind, Result.FAILED, now())
                    print("{} CLEAN FAILED".format(result.ip))
                #elif result.code == Result.ACCEPT:
                    #self.db.update_top(result.ip, result.kind, Result.ACCEPT, now())
            else:
                raise Exception("Clean must be after all tasks finished")
        
        # Wait for all of the tasks to finish
        tasks.join()

        # Add a poison pill for each consumer
        for _ in range(self.num_consumers):
            tasks.put(None)

        self.db.update_info(-1, end=now())
Пример #23
0
	def success(self, client, payload):
		reactor.callInThread(store_certs, client, payload, self.__hosts, self.__batch_time, database.now())
		log_activity(client, 'certs')
Пример #24
0
def printutf8(s, ofile=sys.stdout.buffer):
    outs = database.now() + " " + str(s) + "\n"
    ofile.write(outs.encode('utf-8'))
    ofile.flush()
Пример #25
0
	def datagramReceived(self, dgram, addr):
		logger.trace("Packet from %s", addr)
		if len(dgram) < 13:
			logger.warn("Spoof packet too short (probably a stray one), only %s bytes", len(dgram))
			return
		(magic, token, spoofed) = struct.unpack('!LQ?', dgram[:13])
		if magic != 0x17ACEE43:
			logger.warn("Wrong magic number in spoof packet (probably a stray one)")
			return
		tok = self.__spoof.get_token(token)
		if not tok:
			logger.warn("Token %s not known", token)
			return
		if spoofed:
			tok.expect_spoofed = False
		else:
			tok.expect_ordinary = False
		if not tok.expect_spoofed and not tok.expect_ordinary:
			self.__spoof.drop_token(token)
		reactor.callInThread(store_packet, tok, spoofed, (not spoofed) or (addr[0] == self.__spoof.src_addr()), addr[0], database.now())
		activity.log_activity(tok.client(), 'spoof')