Пример #1
0
	def updateCollection(self, key, doc):
		# bindata will reduce id size by 50%
		#self.collection.update({ "_id": bson.binary.Binary(key) }, doc, True)
		self.collection.update({ "_id": key }, doc, True)
		if self.nodes_collection:
			newdoc = doc["$set"]
			newdoc.update(doc["$inc"])
			common.update_node_index(newdoc, self.nodes_collection, config.flow_aggr_sums)
		if self.ports_collection:
			newdoc = doc["$set"]
			newdoc.update(doc["$inc"])
			common.update_port_index(newdoc, self.ports_collection, config.flow_aggr_sums, known_ports)
		self.db_requests += 1
Пример #2
0
 def updateCollection(self, key, doc):
     # bindata will reduce id size by 50%
     #self.collection.update({ "_id": bson.binary.Binary(key) }, doc, True)
     self.collection.update({"_id": key}, doc, True)
     if self.nodes_collection:
         newdoc = doc["$set"]
         newdoc.update(doc["$inc"])
         common.update_node_index(newdoc, self.nodes_collection,
                                  config.flow_aggr_sums)
     if self.ports_collection:
         newdoc = doc["$set"]
         newdoc.update(doc["$inc"])
         common.update_port_index(newdoc, self.ports_collection,
                                  config.flow_aggr_sums, known_ports)
     self.db_requests += 1
Пример #3
0
				# store the current flow time stamp as beginning time
				time_since_last_flush = obj[common.COL_LAST_SWITCHED]
		except ValueError, e:
			print >> sys.stderr, "Could not decode JSON object in queue: ", e
			continue

		# only import flow if it is newer than config.max_flow_time
		if config.max_flow_age != 0 and obj[common.COL_FIRST_SWITCHED] < (time.mktime(datetime.datetime.utcfromtimestamp(time.time()).timetuple()) - config.max_flow_age):
			print "Flow is too old to be imported into mongodb. Skipping flow ..."
			continue
	
		# Bucket slicing
		for handler in handlers:
			handler.handleFlow(obj)
			
		common.update_node_index(obj, node_index_collection, config.flow_aggr_sums)
		common.update_port_index(obj, port_index_collection, config.flow_aggr_sums, known_ports)
			
		output_flows += 1

		if config.live_import:
			# try to periodically flush the caches.
			# do this every 100,000 flows or every five minutes based 
			# on the timestamps we get in the flow data 
			if output_flows % 100000 == 0 or obj[common.COL_LAST_SWITCHED] > (time_since_last_flush + 300):
				time_since_last_flush = obj[common.COL_LAST_SWITCHED]
				print "Live import. Flushing caches ..."
				for handler in handlers:
					handler.flushCache()

		
Пример #4
0
            print >> sys.stderr, "Could not decode JSON object in queue: ", e
            continue

        # only import flow if it is newer than config.max_flow_time
        if config.max_flow_age != 0 and obj[common.COL_FIRST_SWITCHED] < (
                time.mktime(
                    datetime.datetime.utcfromtimestamp(
                        time.time()).timetuple()) - config.max_flow_age):
            print "Flow is too old to be imported into mongodb. Skipping flow ..."
            continue

        # Bucket slicing
        for handler in handlers:
            handler.handleFlow(obj)

        common.update_node_index(obj, node_index_collection,
                                 config.flow_aggr_sums)
        common.update_port_index(obj, port_index_collection,
                                 config.flow_aggr_sums, known_ports)

        output_flows += 1

        if config.live_import:
            # try to periodically flush the caches.
            # do this every 100,000 flows or every five minutes based
            # on the timestamps we get in the flow data
            if output_flows % 100000 == 0 or obj[common.COL_LAST_SWITCHED] > (
                    time_since_last_flush + 300):
                time_since_last_flush = obj[common.COL_LAST_SWITCHED]
                print "Live import. Flushing caches ..."
                for handler in handlers:
                    handler.flushCache()