def rest_email_send(self): """REST POST /api/amlight/kytos_courier/email_send The following keys can be used in the json content. All of these parameters are mandatory, optionally you can set them in the setting.py file as a fallback. Your mailserver should be able to relay emails from the server where this application is running. # 'm_from' sender's email # 'm_to' destination's email # 'm_subject' email subject # 'm_body' email message body kwargs = { 'm_from': '*****@*****.**', 'm_to': '*****@*****.**', 'm_subject': 'Notification subject', 'm_body': 'Notification content' } """ try: content = request.get_json() self._email_send(**content) return Response(response=json.dumps(content), status=200) except smtplib.SMTPException as e: log.error(str(e)) return Response(str(e), status=400)
def load_evcs(self, event): """Try to load the unloaded EVCs from storehouse.""" with self._lock: log.debug("Event load_evcs %s", event) circuits = self.storehouse.get_data() if not self._circuits_by_interface: self.load_circuits_by_interface(circuits) interface_id = '{}:{}'.format(event.content['switch'], event.content['port']) for circuit_id in self._circuits_by_interface.get( interface_id, []): if circuit_id in circuits and circuit_id not in self.circuits: try: evc = self._evc_from_dict(circuits[circuit_id]) except ValueError as exception: log.error(f'Could not load EVC {circuit_id} ' f'because {exception}') continue evc.deactivate() evc.current_path = Path([]) evc.sync() self.circuits.setdefault(circuit_id, evc) self.sched.add(evc)
def _create_box_callback(self, _event, data, error): """Execute the callback to handle create_box.""" if error: log.error(f'Can\'t create box with namespace {self.namespace}') self.box = data log.info(f'Box {self.box.box_id} was created in {self.namespace}.')
def _run_traces(self, trace_interval): """ Kytos Thread that will keep reading the self.request_queue queue looking for new traces to start. Args: trace_interval = sleeping time """ while True: if self.number_pending_requests() > 0: try: r_ids = [] for r_id in self._request_queue: entries = self._request_queue[r_id] new_thread(self._spawn_trace, ( r_id, entries, )) r_ids.append(r_id) # After starting traces for new requests, # remove them from self._request_queue for rid in r_ids: del self._request_queue[rid] except Exception as e: log.error("Trace Error: %s" % e) time.sleep(trace_interval)
def _get_box_callback(self, _event, data, error): """Handle get_box method saving the box or logging with the error.""" if error: log.error(f'Box {data.box_id} not found in {self.namespace}.') self.box = data log.debug(f'Box {self.box.box_id} was loaded from storehouse.')
def remove_current_flows(self, current_path=None, force=True): """Remove all flows from current path.""" switches = set() switches.add(self.uni_a.interface.switch) switches.add(self.uni_z.interface.switch) if not current_path: current_path = self.current_path for link in current_path: switches.add(link.endpoint_a.switch) switches.add(link.endpoint_b.switch) match = { "cookie": self.get_cookie(), "cookie_mask": 18446744073709551615, } for switch in switches: try: self._send_flow_mods(switch, [match], 'delete', force=force) except FlowModException: log.error(f"Error removing flows from switch {switch.id} for" f"EVC {self}") current_path.make_vlans_available() for link in current_path: notify_link_available_tags(self._controller, link) self.current_path = Path([]) self.deactivate() self.sync()
def _create_box_callback(self, _event, data, error): """Execute the callback to handle create_box.""" if error: log.error(f'Can\'t create persistence' f'box with namespace {self.namespace}') self.box = data
def _email_send(self, **kwargs): """ Send an email through SMTP """ try: d = { 'm_from': settings.m_from, 'm_to': settings.m_to, 'm_subject': settings.m_subject, 'm_body': settings.m_body, 'm_server_fqdn': settings.m_server_fqdn, 'm_server_port': settings.m_server_port } # overrides global setting values for k, v in kwargs.items(): d[k] = v msg = MIMEText(d['m_body']) msg['Subject'] = d['m_subject'] msg['From'] = d['m_from'] msg['To'] = d['m_to'] s = smtplib.SMTP(host=d['m_server_fqdn'], port=d['m_server_port']) # m_to has to be a list in case of multiple destinations s.sendmail(d['m_from'], d['m_to'].split(", "), msg.as_string()) s.quit() log.info('An email was sent to: {0}'.format(d['m_to'])) except smtplib.SMTPException as e: log.error(str(e)) raise
def _create_box_callback(self, _event, data, error): """Execute the callback to handle create_box.""" if error: log.error(f"Can't create box with namespace {self.namespace}") self.box = data log.debug(f"Box {self.box.box_id} was created in {self.namespace}.")
def _save_status_callback(self, _event, data, error): """Display the saved network status in the log.""" if error: log.error(f'Can\'t update persistence box {data.box_id}.') log.info('Network administrative status saved in ' f'{self.namespace}.{data.box_id}')
def event_flows_install_delete(self, event): """Install or delete flows in the switches through events. Install or delete Flow of switches identified by dpid. """ try: dpid = event.content['dpid'] flow_dict = event.content['flow_dict'] except KeyError as error: log.error("Error getting fields to install or remove " f"Flows: {error}") return if event.name == 'kytos.flow_manager.flows.install': command = 'add' elif event.name == 'kytos.flow_manager.flows.delete': command = 'delete' else: msg = f'Invalid event "{event.name}", should be install|delete' raise ValueError(msg) switch = self.controller.get_switch_by_dpid(dpid) try: self._install_flows(command, flow_dict, [switch]) except InvalidCommandError as error: log.error("Error installing or deleting Flow through" f" Kytos Event: {error}")
def execute(self): """This NApp is event-oriented""" # Token settings, auto shutdown. if self.has_failed: log.error("kytos courier will shut down! Fix the dependency first") self.controller.unload_napp(self.username, self.name)
def cancel_job(self, circuit_scheduler_id): """Cancel a specific job from scheduler.""" try: self.scheduler.remove_job(circuit_scheduler_id) except JobLookupError as job_error: # Job was not found... Maybe someone already remove it. log.error("Scheduler error cancelling job. %s" % job_error)
def _query_assemble(clause, namespace, start, end, field=None, method=None, group=None, fill=None): if clause.upper() == 'SELECT': if field is None: clause += f' * FROM {namespace}' else: if method is None: clause += f" {field} FROM {namespace}" else: clause += f" {method}({field}) FROM {namespace}" elif clause.upper() == 'DELETE': clause += f' FROM {namespace}' else: log.error(f'Error. Invalid clause "{clause}".') time_clause = " WHERE time " if start is not None: clause += f"{time_clause} >'{str(start)}'" if end is not None: clause += f" AND time <'{str(end)}'" elif start is None and end is not None: clause += f"{time_clause} < '{str(end)}'" if group is not None: clause += f" GROUP BY time({group})" if fill is not None: clause += f" fill({fill})" return clause
def _send(self, d): """ Sends this 'd' dict via slacker """ try: # if settings wasn't properly setup in the first place if not self.has_failed: if not d.get('payload'): raise ValueError( "The dictionary should contain at least the 'payload' key-value" ) slack_msg = "" for k in ['source', 'tag', 'payload']: slack_msg = self._parse_str(slack_msg, d.get(k)) # fallback to #general ch = d.get('channel') if d.get('channel') else 'general' log.info('channel:{0} msg:{1}'.format(ch, slack_msg)) self.slack.chat.post_message(ch, slack_msg) except requests.exceptions.ConnectionError as e: err = "ConnectionError to Slack API. Make sure you are connected and can reach Slack API." log.error(err) raise ValueError(err) except self.error as e: err = "This slack channel {0} doesn't exist".format(ch) log.error(err) raise ValueError(err)
def iso_format_validation(timestamp): ''' Verify if a timestamp is in isoformat. If it's not, try to convert it. ''' if timestamp is None: return timestamp if not isinstance(timestamp, str): timestamp = str(timestamp) first_part = "(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-" second_part = "(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):([0-5][0-9]):" third_part = "([0-5][0-9])(\\.[0-9]+)?(Z|[+-](?:2[0-3]|[01][0-9]):[0-5]"\ "[0-9])" regex = first_part + second_part + third_part regex_iso = r'^(-?' + regex + '?$' regex_date = r'^([12]\d{3}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01]))' match_iso = re.compile(regex_iso).match match_date = re.compile(regex_date).match if match_iso(timestamp) is None and match_date(timestamp) is None: try: timestamp = float(timestamp) iso = '%Y-%m-%dT%H:%M:%SZ' timestamp = datetime.utcfromtimestamp(timestamp).strftime(iso) except ValueError: log.error("Error. Timestamp is not is ISO-8601 format.") return 400 return timestamp
def update_instance(self, event, data, error): """Display in Kytos console if the data was updated.""" entities = event.content.get('namespace', '').split('.')[-2] if error: log.error(f'Error trying to update storehouse {entities}.') else: log.debug(f'Storehouse update to entities: {entities}.')
def _run_traces(self, trace_interval): """ Thread that will keep reading the self._request_queue queue looking for new trace requests to run. Args: trace_interval = sleeping time """ while True: if self.number_pending_requests() > 0: try: new_request_ids = [] for req_id in self._request_queue: if not self.limit_traces_reached(): entries = self._request_queue[req_id] self._running_traces[req_id] = entries new_thread(self._spawn_trace, (req_id, entries,)) new_request_ids.append(req_id) else: break # After starting traces for new requests, # remove them from self._request_queue for rid in new_request_ids: del self._request_queue[rid] except Exception as error: # pylint: disable=broad-except log.error("Trace Error: %s" % error) time.sleep(trace_interval)
def _activate_host_evc(self, dpid, cvlan) -> Response: """Activate the host EVPL cvlan :dpid: Switch dpid :cvlan: customer vlan (int) """ fmods = [] # untagged fmod = self.prepare_flow_mod( in_interface=self.edge_host_ofnum, out_interface=self.bb_edge_nni_ofnum, push=True, out_vlan=cvlan, ) fmods.append(fmod) # pop fmod_opposite = self.prepare_flow_mod( in_interface=self.bb_edge_nni_ofnum, out_interface=self.edge_host_ofnum, in_vlan=cvlan, pop=True, ) fmods.append(fmod_opposite) response = self.send_flow_mods(dpid, fmods) if response.status_code != 200: log.error("Response {}".format(response.text)) return response
def status(self): """Check for the status of a path. If any link in this path is down, the path is considered down. """ if not self: return EntityStatus.DISABLED endpoint = '%s/%s' % (settings.TOPOLOGY_URL, 'links') api_reply = requests.get(endpoint) if api_reply.status_code != getattr(requests.codes, 'ok'): log.error('Failed to get links at %s. Returned %s', endpoint, api_reply.status_code) return None links = api_reply.json()['links'] return_status = EntityStatus.UP for path_link in self: try: link = links[path_link.id] except KeyError: return EntityStatus.DISABLED if link['enabled'] is False: return EntityStatus.DISABLED if link['active'] is False: return_status = EntityStatus.DOWN return return_status
def _provision_host_bb_evc(self, dpid: str, path: int = 1) -> Response: """Provision Backbone Ethernet Virtual Circuit of the Host on this dpid. :dpid: Switch dpid :path: int 1, 2, or 3 """ fmods = [] vlan = self.bb_host_vlan # tagged fmod = self.prepare_flow_mod( in_interface=self.bb_edge_uni_ofnum, out_interface=int(path) + self.bb_edge_uni_ofnum, in_vlan=vlan, out_vlan=vlan, ) fmods.append(fmod) # tagged fmod_opposite = self.prepare_flow_mod( in_interface=int(path) + self.bb_edge_uni_ofnum, out_interface=self.bb_edge_uni_ofnum, in_vlan=vlan, out_vlan=vlan, ) fmods.append(fmod_opposite) response = self.send_flow_mods(dpid, fmods) if response.status_code != 200: log.error("Response {}".format(response.text)) return response
def _provision_bb_evcs(self, dpid: str) -> Response: """Provision Backbone Ethernet Virtual Circuits for this dpid. :dpid: Switch dpid """ fmods = [] for nni, vlan in zip(self.bb_sws["nni_ofnums"], self.bb_sws["nni_vlans"]): # tagged fmod = self.prepare_flow_mod( in_interface=self.bb_sws["uni_ofnum"], out_interface=nni, in_vlan=vlan, out_vlan=vlan, ) fmods.append(fmod) # tagged fmod_opposite = self.prepare_flow_mod( in_interface=nni, out_interface=self.bb_sws["uni_ofnum"], in_vlan=vlan, out_vlan=vlan, ) fmods.append(fmod_opposite) response = self.send_flow_mods(dpid, fmods) if response.status_code != 200: log.error("Response {}".format(response.text)) return response
def _run_traces(self, trace_interval): """ Thread that will keep reading the self._request_queue queue looking for new trace requests to run. Args: trace_interval = sleeping time """ while True: if self.number_pending_requests() > 0: try: new_request_ids = [] for req_id in self._request_queue: if not self.limit_traces_reached(): entries = self._request_queue[req_id] self._running_traces[req_id] = entries new_thread(self._spawn_trace, ( req_id, entries, )) new_request_ids.append(req_id) else: break # After starting traces for new requests, # remove them from self._request_queue for rid in new_request_ids: del self._request_queue[rid] except Exception as error: # pylint: disable=broad-except log.error("Trace Error: %s" % error) time.sleep(trace_interval)
def handle_port_deleted(self, event): """Delete a port from a switch. It also does the necessary cleanup on the topology. """ # Get Switch device = self.topology.get_device(event.content['switch']) if device is None: log.error('Device %s not found.', event.content['switch']) return # Get Switch Port port = device.get_port(event.content['port']) if port is None: msg = 'Port %s not found on switch %s. Nothing to delete.' log(msg, event.content['port'], device.id_) return # Create the interface object interface = Interface(device, port) # Get Link from Interface link = self.topology.get_link(interface) # Destroy the link self.topology.unset_link(link) # Remove the port device.remove_port(port)
def update_colors(self, links): """ Color each switch, with the color based on the switch's DPID. After that, if not yet installed, installs, for each switch, flows with the color of its neighbors, to send probe packets to the controller. """ url = settings.FLOW_MANAGER_URL for switch in self.controller.switches.values(): if switch.dpid not in self.switches: color = int(switch.dpid.replace(':', '')[4:], 16) self.switches[switch.dpid] = {'color': color, 'neighbors': set(), 'flows': {}} else: self.switches[switch.dpid]['neighbors'] = set() for link in links: source = link['endpoint_a']['switch'] target = link['endpoint_b']['switch'] if source != target: self.switches[source]['neighbors'].add(target) self.switches[target]['neighbors'].add(source) # Create the flows for each neighbor of each switch and installs it # if not already installed for dpid, switch_dict in self.switches.items(): switch = self.controller.get_switch_by_dpid(dpid) if switch.ofp_version == '0x01': controller_port = Port.OFPP_CONTROLLER elif switch.ofp_version == '0x04': controller_port = PortNo.OFPP_CONTROLLER else: continue for neighbor in switch_dict['neighbors']: if neighbor not in switch_dict['flows']: flow_dict = { 'table_id': 0, 'match': {}, 'priority': 50000, 'actions': [ {'action_type': 'output', 'port': controller_port} ]} flow_dict['match'][settings.COLOR_FIELD] = \ self.color_to_field( self.switches[neighbor]['color'], settings.COLOR_FIELD ) switch_dict['flows'][neighbor] = flow_dict returned = requests.post( url % dpid, json={'flows': [flow_dict]} ) if returned.status_code // 100 != 2: log.error('Flow manager returned an error inserting ' 'flow. Status code %s' % (returned.status_code,))
def load_from_store(self, event, box, error): """Save the data retrived from storehouse.""" entities = event.content.get('namespace', '').split('.')[-2] if error: log.error('Error while get a box from storehouse.') else: self.store_items[entities] = box log.debug('Data updated')
def _save_evc_callback(self, _event, data, error): """Display the save EVC result in the log.""" self._lock.release() log.debug(f'Lock {self._lock} released.') if error: log.error(f'Can\'t update the {self.box.box_id}') log.info(f'Box {data.box_id} was updated.')
def remove_circuit(circuit_id): """Delete a circuit from the circuits path.""" path = os.path.join(settings.CIRCUITS_PATH, circuit_id) if not os.access(path, os.W_OK): log.error("Could not delete circuit from %s", path) return None os.remove(path)
def deploy_to_path(self, path=None): """Install the flows for this circuit. Procedures to deploy: 0. Remove current flows installed 1. Decide if will deploy "path" or discover a new path 2. Choose vlan 3. Install NNI flows 4. Install UNI flows 5. Activate 6. Update current_path 7. Update links caches(primary, current, backup) """ self.remove_current_flows() use_path = path if self.should_deploy(use_path): try: use_path.choose_vlans() for link in use_path: notify_link_available_tags(self._controller, link) except KytosNoTagAvailableError: use_path = None else: for use_path in self.discover_new_paths(): if use_path is None: continue try: use_path.choose_vlans() for link in use_path: notify_link_available_tags(self._controller, link) break except KytosNoTagAvailableError: pass else: use_path = None try: if use_path: self._install_nni_flows(use_path) self._install_uni_flows(use_path) elif self.uni_a.interface.switch == self.uni_z.interface.switch: use_path = Path() self._install_direct_uni_flows() else: log.warn(f"{self} was not deployed. " "No available path was found.") return False except FlowModException: log.error(f"Error deploying EVC {self} when calling flow_manager.") self.remove_current_flows(use_path) return False self.activate() self.current_path = use_path self.sync() log.info(f"{self} was deployed.") return True
def validate_timestamp(start, end): """Method use for validate the time stamp. Avoiding that end be smaller than start. """ if start is not None and end is not None: start, end = str(start), str(end) if start > end: log.error("Invalid Data Range: {}, {}".format(start, end)) return 400
def remove_job(self, event): """Remove a job from this scheduler.""" try: job_id = event.content['id'] except KeyError: log.error('Scheduled job must have an id') return self._scheduler.remove_job(job_id)
def _get_colors(self): """ Get list of colors """ try: result = requests.get(url=self._url) if result.status_code == 200: result = json.loads(result.content) self._colors = result['colors'] else: raise Exception except Exception as err: # pylint: disable=broad-except log.error('Error: Can not connect to Kytos/Coloring: %s' % err)
def process_packet_in(event): """ Args: event Return: ethernet: frame in_port: incoming port switch: incoming switch """ of_version = event.content['message'].header.version if of_version.value == 1: return openflow10.packet_in(event, event.content['message']) elif of_version.value == 4: return openflow13.packet_in(event, event.content['message']) log.error("Invalid OpenFlow version") return 0, 0, 0
def send_packet_out(controller, switch, port, data): """ Just prepare and send a PacketOut used by the Tracer. Args: controller: Kytos controller switch: OpenFlow datapath port: in_port data: Ethernet frame """ of_version = switch.features.header.version if of_version.value == 1: openflow10.send_packet_out(controller, switch, port, data) elif of_version.value == 4: openflow13.send_packet_out(controller, switch, port, data) else: log.error("Invalid OpenFlow version") return