def group_delete(self, group_mod): assert isinstance(group_mod, ofp.ofp_group_mod) groups = OrderedDict((g.desc.group_id, g) for g in self.groups_proxy.get('/').items) groups_changed = False flows_changed = False group_id = group_mod.group_id if group_id == ofp.OFPG_ALL: # TODO we must delete all flows that point to this group and # signal controller as requested by flow's flag groups = OrderedDict() groups_changed = True self.log.debug('all-groups-deleted') else: if group_id not in groups: # per openflow spec, this is not an error pass else: flows = list(self.flows_proxy.get('/').items) flows_changed, flows = self.flows_delete_by_group_id( flows, group_id) del groups[group_id] groups_changed = True self.log.debug('group-deleted', group_id=group_id) if groups_changed: self.groups_proxy.update('/', FlowGroups(items=groups.values())) if flows_changed: self.flows_proxy.update('/', Flows(items=flows))
def test_add_flow(self): flow_mod = mk_simple_flow_mod(match_fields=[], actions=[]) self.lda.update_flow_table(flow_mod) expected_flows = Flows( items=[flow_stats_entry_from_flow_mod_message(flow_mod)]) self.assertFlowsEqual(self.flows, expected_flows)
def ListDeviceFlows(self, request, context): log.info('grpc-request', request=request) if '/' in request.id: context.set_details('Malformed device id \'{}\''.format( request.id)) context.set_code(StatusCode.INVALID_ARGUMENT) return Flows() try: flows = self.root.get('/devices/{}/flows'.format(request.id)) return flows except KeyError: context.set_details('Device \'{}\' not found'.format(request.id)) context.set_code(StatusCode.NOT_FOUND) return Flows()
def flow_delete(self, mod): assert isinstance(mod, (ofp.ofp_flow_mod, ofp.ofp_flow_stats)) # read from model flows = list(self.flows_proxy.get('/').items) # build a list of what to keep vs what to delete to_keep = [] to_delete = [] for f in flows: if self.flow_matches_spec(f, mod): to_delete.append(f) else: to_keep.append(f) # replace flow table with keepers flows = to_keep # write back if to_delete: self.flows_proxy.update('/', Flows(items=flows)) self.log.debug("flow deleted", mod=mod) # from mod send announcement if isinstance(mod, ofp.ofp_flow_mod): # send notifications for discarded flow as required by OpenFlow self.announce_flows_deleted(to_delete)
def _delete_all_flows(self): """ Delete all flows on the device """ try: self.flows_proxy.update('/', Flows(items=[])) self.groups_proxy.update('/', FlowGroups(items=[])) except Exception, e: self.exception('flow-delete-exception', e=e)
def update_children_flows(self, device_rules_map): for device_id, (flows, groups) in device_rules_map.iteritems(): if device_id != self.device_id: self.root_proxy.update('/devices/{}/flows'.format(device_id), Flows(items=flows.values())) self.root_proxy.update('/devices/{}/flow_groups'.format( device_id), FlowGroups(items=groups.values()))
def _flow_table_updated(self, flows): self.log.debug('flow-table-updated', logical_device_id=self.logical_device_id, flows=flows) if self._no_flow_changes_required: # Stats changes, no need to process further self.log.debug('flow-stats-update') else: groups = self.groups_proxy.get('/').items device_rules_map = self.decompose_rules(flows.items, groups) # TODO we have to evolve this into a policy-based, event based pattern # This is a raw implementation of the specific use-case with certain # built-in assumptions, and not yet device vendor specific. The policy- # based refinement will be introduced that later. # Temporary bypass for openolt if self.accepts_direct_logical_flows: #give the logical flows directly to the adapter self.log.debug('it is an direct logical flow bypass') if self.device_adapter_agent is None: self.log.error('No device adapter agent', device_id=self.device_id, logical_device_id=self.logical_device_id) return flows_to_add = [] for f in flows.items: if f.id in self._flows_ids_to_add: flows_to_add.append(f) self.log.debug('flows to remove', flows_to_remove=self._flows_to_remove, flows_ids=self._flows_ids_to_remove) try: self.device_adapter_agent.update_logical_flows( self.device_id, flows_to_add, self._flows_to_remove, groups, device_rules_map) except Exception as e: self.log.error('logical flows bypass error', error=e, flows=flows) else: for device_id, (flows, groups) in device_rules_map.iteritems(): self.root_proxy.update( '/devices/{}/flows'.format(device_id), Flows(items=flows.values())) self.root_proxy.update( '/devices/{}/flow_groups'.format(device_id), FlowGroups(items=groups.values()))
def test_add_different_flows(self): flow_mod1 = mk_simple_flow_mod(match_fields=[in_port(1)], actions=[]) flow_mod2 = mk_simple_flow_mod(match_fields=[in_port(2)], actions=[]) self.lda.update_flow_table(flow_mod1) self.lda.update_flow_table(flow_mod2) expected_flows = Flows(items=[ flow_stats_entry_from_flow_mod_message(flow_mod1), flow_stats_entry_from_flow_mod_message(flow_mod2) ]) self.assertFlowsEqual(self.flows, expected_flows)
def _group_table_updated(self, flow_groups): self.log.debug('group-table-updated', logical_device_id=self.logical_device_id, flow_groups=flow_groups) flows = self.flows_proxy.get('/').items device_flows_map = self.decompose_rules(flows, flow_groups.items) for device_id, (flows, groups) in device_flows_map.iteritems(): self.root_proxy.update('/devices/{}/flows'.format(device_id), Flows(items=flows.values())) self.root_proxy.update('/devices/{}/flow_groups'.format(device_id), FlowGroups(items=groups.values()))
def remove_flow(self, flow): self.log.debug('trying to remove flows from logical flow :', logical_flow=flow) device_flows_to_remove = [] device_flows = self.flows_proxy.get('/').items for f in device_flows: if f.cookie == flow.id: device_flows_to_remove.append(f) for f in device_flows_to_remove: (id, direction) = self.decode_stored_id(f.id) flow_to_remove = openolt_pb2.Flow(flow_id=id, flow_type=direction) try: self.stub.FlowRemove(flow_to_remove) except grpc.RpcError as grpc_e: if grpc_e.code() == grpc.StatusCode.NOT_FOUND: self.log.debug( 'This flow does not exist on the switch, ' 'normal after an OLT reboot', flow=flow_to_remove) else: raise grpc_e # once we have successfully deleted the flow on the device # release the flow_id on resource pool and also clear any # data associated with the flow_id on KV store. self._clear_flow_id_from_rm(f, id, direction) self.log.debug('flow removed from device', flow=f, flow_key=flow_to_remove) if len(device_flows_to_remove) > 0: new_flows = [] flows_ids_to_remove = [f.id for f in device_flows_to_remove] for f in device_flows: if f.id not in flows_ids_to_remove: new_flows.append(f) self.flows_proxy.update('/', Flows(items=new_flows)) self.log.debug('flows removed from the data store', flow_ids_removed=flows_ids_to_remove, number_of_flows_removed=(len(device_flows) - len(new_flows)), expected_flows_removed=len(device_flows_to_remove)) else: self.log.debug( 'no device flow to remove for this flow (normal ' 'for multi table flows)', flow=flow)
def _flow_table_updated(self, flows): self.log.debug('flow-table-updated', logical_device_id=self.logical_device_id, flows=flows) # TODO we have to evolve this into a policy-based, event based pattern # This is a raw implementation of the specific use-case with certain # built-in assumptions, and not yet device vendor specific. The policy- # based refinement will be introduced that later. groups = self.groups_proxy.get('/').items device_rules_map = self.decompose_rules(flows.items, groups) for device_id, (flows, groups) in device_rules_map.iteritems(): self.root_proxy.update('/devices/{}/flows'.format(device_id), Flows(items=flows.values())) self.root_proxy.update('/devices/{}/flow_groups'.format(device_id), FlowGroups(items=groups.values()))
def flow_delete_strict(self, mod): assert isinstance(mod, ofp.ofp_flow_mod) # read from model flows = list(self.flows_proxy.get('/').items) changed = False flow = flow_stats_entry_from_flow_mod_message(mod) idx = self.find_flow(flows, flow) if (idx >= 0): del flows[idx] changed = True else: # TODO need to check what to do with this case self.log.warn('flow-cannot-delete', flow=flow) if changed: self.flows_proxy.update('/', Flows(items=flows))
def flow_add(self, mod): assert isinstance(mod, ofp.ofp_flow_mod) assert mod.cookie_mask == 0 # read from model flows = list(self.flows_proxy.get('/').items) flow = flow_stats_entry_from_flow_mod_message(mod) changed = updated = False check_overlap = mod.flags & ofp.OFPFF_CHECK_OVERLAP if check_overlap: if self.find_overlapping_flows(flows, mod, True): self.signal_flow_mod_error( ofp.OFPFMFC_OVERLAP, mod) else: # free to add as new flow flows.append(flow) changed = True self.log.debug('flow-added', flow=mod) else: idx = self.find_flow(flows, flow) if idx >= 0: old_flow = flows[idx] if not (mod.flags & ofp.OFPFF_RESET_COUNTS): flow.byte_count = old_flow.byte_count flow.packet_count = old_flow.packet_count flows[idx] = flow changed = updated = True self.log.debug('flow-updated') else: flows.append(flow) changed = True self.log.debug('flow-added') # write back to model if changed: self.flows_proxy.update('/', Flows(items=flows)) if not updated: self.update_flow_count_of_meter_stats(mod, flow)
def setUp(self): self.setup_mock_registry() self.flows = Flows(items=[]) self.groups = FlowGroups(items=[]) self.ld_ports = [ LogicalPort(id='0', device_id='olt', device_port_no=0, root_port=True, ofp_port=ofp.ofp_port(port_no=0)), LogicalPort(id='1', device_id='onu1', device_port_no=0, ofp_port=ofp.ofp_port(port_no=1)), LogicalPort(id='2', device_id='onu2', device_port_no=0, ofp_port=ofp.ofp_port(port_no=2)) ] self.devices = { 'olt': Device(id='olt', root=True, parent_id='id'), 'onu1': Device(id='onu1', parent_id='olt', parent_port_no=1, vlan=101), 'onu2': Device(id='onu2', parent_id='olt', parent_port_no=1, vlan=102), } self.ports = { 'olt': [ Port(port_no=0, type=Port.ETHERNET_NNI, device_id='olt'), Port(port_no=1, type=Port.PON_OLT, device_id='olt', peers=[ Port.PeerPort(device_id='onu1', port_no=1), Port.PeerPort(device_id='onu2', port_no=1) ]) ], 'onu1': [ Port(port_no=0, type=Port.ETHERNET_UNI, device_id='onu1'), Port(port_no=1, type=Port.PON_ONU, device_id='onu1', peers=[ Port.PeerPort(device_id='olt', port_no=1), ]) ], 'onu2': [ Port(port_no=0, type=Port.ETHERNET_UNI, device_id='onu2'), Port(port_no=1, type=Port.PON_ONU, device_id='onu2', peers=[ Port.PeerPort(device_id='olt', port_no=1), ]) ], } self.device_flows = {'olt': Flows(), 'onu1': Flows(), 'onu2': Flows()} self.device_groups = { 'olt': FlowGroups(), 'onu1': FlowGroups(), 'onu2': FlowGroups() } self.ld = LogicalDevice(id='id', root_device_id='olt') self.root_proxy = Mock() def get_devices(path): if path == '': return self.devices.values() if path.endswith('/ports'): return self.ports[path[:-len('/ports')]] elif path.find('/') == -1: return self.devices[path] else: raise Exception( 'Nothing to yield for path /devices/{}'.format(path)) def update_devices(path, data): if path.endswith('/flows'): self.device_flows[path[:-len('/flows')]] = data elif path.endswith('/flow_groups'): self.device_groups[path[:-len('/flow_groups')]] = data else: raise NotImplementedError( 'not handling path /devices/{}'.format(path)) self.root_proxy.get = lambda p: \ get_devices(p[len('/devices/'):]) if p.startswith('/devices') \ else None self.root_proxy.update = lambda p, d: \ update_devices(p[len('/devices/'):], d) \ if p.startswith('/devices') \ else None self.ld_proxy = Mock() self.ld_proxy.get = lambda p: \ self.ld_ports if p == '/ports' else ( self.ld if p == '/' else None ) self.flows_proxy = Mock() self.flows_proxy.get = lambda _: self.flows # always '/' path def update_flows(_, flows): # always '/' path self.flows = flows self.flows_proxy.update = update_flows self.groups_proxy = Mock() self.groups_proxy.get = lambda _: self.groups # always '/' path def update_groups(_, groups): # always '/' path self.groups = groups self.groups_proxy.update = update_groups self.core = Mock() self.core.get_proxy = lambda path: \ self.root_proxy if path == '/' else ( self.ld_proxy if path.endswith('id') else ( self.flows_proxy if path.endswith('flows') else self.groups_proxy ) ) self.lda = LogicalDeviceAgent(self.core, self.ld)
def reset_flows(self): self.flows_proxy.update('/', Flows())