def set_up_policy(self, group_by): """Setup policy structure and basic callbacks.""" self.bucket_policies = [] self.groupby_filter = LimitFilter(1,group_by) self.fb = FwdBucket() # fb sees first packet of each new grouping self.fb.register_callback(self.groupby_filter.update_policy) self.fb.register_callback(self.init_countbucket) super(counts,self).__init__(self.groupby_filter >> self.fb)
def __init__(self, limit=None, group_by=[]): self.fb = FwdBucket() self.register_callback = self.fb.register_callback if limit is None: super(packets, self).__init__(self.fb) else: self.limit_filter = LimitFilter(limit, group_by) self.fb.register_callback(self.limit_filter.update_policy) super(packets, self).__init__(self.limit_filter >> self.fb)
def set_up_policy(self, group_by): """Setup policy structure and basic callbacks.""" self.bucket_policies = [] self.groupby_filter = LimitFilter(1, group_by) self.fb = FwdBucket() # fb sees first packet of each new grouping self.fb.register_callback(self.groupby_filter.update_policy) self.fb.register_callback(self.init_countbucket) super(counts, self).__init__(self.groupby_filter >> self.fb)
def __init__(self, interval, group_by=[]): FwdBucket.__init__(self) self.interval = interval self.group_by = group_by if group_by: self.aggregate = {} else: self.aggregate = 0 def report_count(callbacks,aggregate,interval): while(True): for callback in callbacks: callback(aggregate) time.sleep(interval) self.query_thread = Thread(target=report_count,args=(self.callbacks,self.aggregate,self.interval)) self.query_thread.daemon = True self.query_thread.start()
def __init__(self,limit=None,group_by=[]): self.fb = FwdBucket() self.register_callback = self.fb.register_callback if limit is None: super(packets,self).__init__(self.fb) else: self.limit_filter = LimitFilter(limit,group_by) self.fb.register_callback(self.limit_filter.update_policy) super(packets,self).__init__(self.limit_filter >> self.fb)
class packets(DerivedPolicy): """A FwdBucket preceeded by a LimitFilter. :param limit: the number of packets to be matched in each grouping. :type limit: int :param group_by: the fields by which to group packets. :type group_by: list string """ def __init__(self,limit=None,group_by=[]): self.fb = FwdBucket() self.register_callback = self.fb.register_callback if limit is None: super(packets,self).__init__(self.fb) else: self.limit_filter = LimitFilter(limit,group_by) self.fb.register_callback(self.limit_filter.update_policy) super(packets,self).__init__(self.limit_filter >> self.fb) def __repr__(self): return "packets\n%s" % repr(self.policy)
class packets(DerivedPolicy): """A FwdBucket preceeded by a LimitFilter. :param limit: the number of packets to be matched in each grouping. :type limit: int :param group_by: the fields by which to group packets. :type group_by: list string """ def __init__(self, limit=None, group_by=[]): self.fb = FwdBucket() self.register_callback = self.fb.register_callback if limit is None: super(packets, self).__init__(self.fb) else: self.limit_filter = LimitFilter(limit, group_by) self.fb.register_callback(self.limit_filter.update_policy) super(packets, self).__init__(self.limit_filter >> self.fb) def __repr__(self): return "packets\n%s" % repr(self.policy)
class counts(DynamicPolicy): """A CountBucket that returns distinct counts per grouping, defined by a set of header fields. :param interval: time period between successive pulls of switch statistics :type interval: some float :param group_by: list of grouping fields :type group_by: string list """ def __init__(self, interval=None, group_by=[]): self.set_up_policy(group_by) self.set_up_stats() self.set_up_polling(interval) def set_up_policy(self, group_by): """Setup policy structure and basic callbacks.""" self.bucket_policies = [] self.groupby_filter = LimitFilter(1, group_by) self.fb = FwdBucket() # fb sees first packet of each new grouping self.fb.register_callback(self.groupby_filter.update_policy) self.fb.register_callback(self.init_countbucket) super(counts, self).__init__(self.groupby_filter >> self.fb) def set_up_stats(self): """Setup for pulling stats and related book-keeping.""" self.callbacks = [] self.bucket_dict = {} self.reported_counts = {} self.queried_preds = set([]) self.queried_preds_lock = Lock() def set_up_polling(self, interval): """Setup polling of stats from switches every `interval` seconds. If interval is None, the application needs to call pull_stats directly.""" if interval: self.qt = Thread(target=self.query_thread, args=(interval,)) self.qt.daemon = True self.qt.start() def query_thread(self, interval): """Thread that calls pull_stats every `interval` seconds.""" time.sleep(interval) while True: self.pull_stats() time.sleep(interval) def init_countbucket(self, pkt): """When a packet from a previously unseen grouping arrives, set up new count buckets for the same. """ pred = self.groupby_filter.get_pred_from_pkt(pkt) cb = CountBucket() cb.register_callback(self.collect_pred(pred)) self.bucket_policies.append(pred >> cb) self.bucket_dict[pred] = cb # Send the current packet to the new countbucket cb.eval(pkt) cb.apply() # In future, send all packets of this grouping directly to this bucket self.policy = (self.groupby_filter >> self.fb) + union(self.bucket_policies) def collect_pred(self, pred): """Return a callback function specific to each grouping predicate.""" def collect(pkt_byte_counts): with self.queried_preds_lock: if pred in self.queried_preds: self.reported_counts[pred.map] = pkt_byte_counts self.queried_preds.remove(pred) self.call_callbacks() return collect def call_callbacks(self): """Check if all queried buckets have returned, and if so, call the callbacks. """ if not self.queried_preds: for f in self.callbacks: f(self.reported_counts) def register_callback(self, fn): self.callbacks.append(fn) def pull_stats(self): """Pulls statistics from the switches corresponding to all groupings.""" buckets_list = [] with self.queried_preds_lock: self.queried_preds = set(copy.deepcopy(self.bucket_dict.keys())) self.reported_counts = {} for pred in self.queried_preds: buckets_list.append(self.bucket_dict[pred]) # Calling pull_stats while holding the lock creates a potential deadlock for bucket in buckets_list: bucket.pull_stats() # call callbacks in case no preds were queried in the first place self.call_callbacks() def __repr__(self): return "counts\n%s" % repr(self.policy)
class counts(DynamicPolicy): """A CountBucket that returns distinct counts per grouping, defined by a set of header fields. :param interval: time period between successive pulls of switch statistics :type interval: some float :param group_by: list of grouping fields :type group_by: string list """ def __init__(self, interval=None, group_by=[]): self.set_up_policy(group_by) self.set_up_stats() self.set_up_polling(interval) def set_up_policy(self, group_by): """Setup policy structure and basic callbacks.""" self.bucket_policies = [] self.groupby_filter = LimitFilter(1,group_by) self.fb = FwdBucket() # fb sees first packet of each new grouping self.fb.register_callback(self.groupby_filter.update_policy) self.fb.register_callback(self.init_countbucket) super(counts,self).__init__(self.groupby_filter >> self.fb) def set_up_stats(self): """Setup for pulling stats and related book-keeping.""" self.callbacks = [] self.bucket_dict = {} self.reported_counts = {} self.queried_preds = set([]) self.queried_preds_lock = Lock() def set_up_polling(self,interval): """Setup polling of stats from switches every `interval` seconds. If interval is None, the application needs to call pull_stats directly.""" if interval: self.qt = Thread(target=self.query_thread, args=(interval,)) self.qt.daemon = True self.qt.start() def query_thread(self, interval): """Thread that calls pull_stats every `interval` seconds.""" time.sleep(interval) while True: self.pull_stats() time.sleep(interval) def init_countbucket(self, pkt): """When a packet from a previously unseen grouping arrives, set up new count buckets for the same. """ pred = self.groupby_filter.get_pred_from_pkt(pkt) cb = CountBucket() cb.register_callback(self.collect_pred(pred)) self.bucket_policies.append(pred >> cb) self.bucket_dict[pred] = cb # Send the current packet to the new countbucket cb.eval(pkt) cb.apply() # In future, send all packets of this grouping directly to this bucket self.policy = ((self.groupby_filter >> self.fb) + union(self.bucket_policies)) def collect_pred(self, pred): """Return a callback function specific to each grouping predicate.""" def collect(pkt_byte_counts): with self.queried_preds_lock: if pred in self.queried_preds: self.reported_counts[pred.map] = pkt_byte_counts self.queried_preds.remove(pred) self.call_callbacks() return collect def call_callbacks(self): """Check if all queried buckets have returned, and if so, call the callbacks. """ if not self.queried_preds: for f in self.callbacks: f(self.reported_counts) def register_callback(self, fn): self.callbacks.append(fn) def pull_stats(self): """Pulls statistics from the switches corresponding to all groupings.""" buckets_list = [] with self.queried_preds_lock: self.queried_preds = set(copy.deepcopy(self.bucket_dict.keys())) self.reported_counts = {} for pred in self.queried_preds: buckets_list.append(self.bucket_dict[pred]) # Calling pull_stats while holding the lock creates a potential deadlock for bucket in buckets_list: bucket.pull_stats() # call callbacks in case no preds were queried in the first place self.call_callbacks() def __repr__(self): return "counts\n%s" % repr(self.policy)