def delete(self, urns, client_cert, credentials, best_effort): """ Deletes the requested resources, identified each by an URN. When best_effort is enabled, deletion will be attempted on every resource. @param urns list of URNs with the identifiers of the resources to be treated @param client_cert client certificate (X509) @param credentials client credential(s), provided by the ClearingHouse and generated after the certificates @param best_effort flag to describe the behaviour upon a failure * best_effort = True : as much operations as possible are performed upon an error condition * best_effort = False : the set of operations will be stopped if an error occurs @return ro_slivers structure containing information of slivers (URN, expiration date, etc) """ ro_slivers = [] logger.info("best_effort=%s" % (best_effort,)) route = db_sync_manager.get_slice_routing_keys(urns) logger.debug("Route=%s" % (route,)) for r, v in route.iteritems(): peer = db_sync_manager.get_configured_peer_by_routing_key(r) logger.debug("peer=%s" % (peer,)) if peer.get("type") in self._allowed_peers.values(): slivers = CommonUtils.manage_delete( peer, v, credentials, best_effort) logger.debug("slivers=%s" % (slivers,)) ro_slivers.extend(slivers) db_urns = [] for s in ro_slivers: s["geni_expires"] = dates.rfc3339_to_datetime(s["geni_expires"]) db_urns.append(s.get("geni_sliver_urn")) logger.debug("RO-Slivers(%d)=%s, DB-URNs(%d)=%s" % (len(ro_slivers), ro_slivers, len(db_urns), db_urns)) # update MS to stop slice-monitoring collection slice_urn = db_sync_manager.get_slice_urn(urns) if slice_urn: try: # MS needs to be sent the whole slice data in order to # delete it slice_monitor = SliceMonitoring() slice_monitor.delete_slice_topology(slice_urn) except Exception as e: logger.warning("Delegate could not send slice monitoring" + " information upon Delete. Details: %s", (e,)) db_sync_manager.delete_slice_sdn(slice_urn) db_sync_manager.delete_slice_urns(db_urns) self.__schedule_tnres_update( tn_resource_refresh, 1, "oneshot_tn_resource_refresh") return ro_slivers
def delete(self, urns, client_cert, credentials, best_effort): """ Deletes the requested resources, identified each by an URN. When best_effort is enabled, deletion will be attempted on every resource. @param urns list of URNs with the identifiers of the resources to be treated @param client_cert client certificate (X509) @param credentials client credential(s), provided by the ClearingHouse and generated after the certificates @param best_effort flag to describe the behaviour upon a failure * best_effort = True : as much operations as possible are performed upon an error condition * best_effort = False : the set of operations will be stopped if an error occurs @return ro_slivers structure containing information of slivers (URN, expiration date, etc) """ ro_slivers = [] logger.info("best_effort=%s" % (best_effort, )) route = db_sync_manager.get_slice_routing_keys(urns) logger.debug("Route=%s" % (route, )) for r, v in route.iteritems(): peer = db_sync_manager.get_configured_peer_by_routing_key(r) logger.debug("peer=%s" % (peer, )) if peer.get("type") in self._allowed_peers.values(): slivers = CommonUtils.manage_delete(peer, v, credentials, best_effort) logger.debug("slivers=%s" % (slivers, )) ro_slivers.extend(slivers) db_urns = [] for s in ro_slivers: s["geni_expires"] = dates.rfc3339_to_datetime(s["geni_expires"]) db_urns.append(s.get("geni_sliver_urn")) logger.debug("RO-Slivers(%d)=%s, DB-URNs(%d)=%s" % (len(ro_slivers), ro_slivers, len(db_urns), db_urns)) # update MS to stop slice-monitoring collection slice_urn = db_sync_manager.get_slice_urn(urns) if slice_urn: try: # MS needs to be sent the whole slice data in order to # delete it slice_monitor = SliceMonitoring() slice_monitor.delete_slice_topology(slice_urn) except Exception as e: logger.warning( "Delegate could not send slice monitoring" + " information upon Delete. Details: %s", (e, )) db_sync_manager.delete_slice_sdn(slice_urn) db_sync_manager.delete_slice_urns(db_urns) return ro_slivers
def provision(self, urns, client_cert, credentials, best_effort, end_time, geni_users): """Documentation see [geniv3rpc] GENIv3DelegateBase. {geni_users} is relevant here.""" ro_manifest, ro_slivers = ROManifestFormatter(), [] client_urn = CommonUtils.fetch_user_name_from_geni_users(geni_users) slice_urn = db_sync_manager.get_slice_urn(urns) slice_monitor = None try: slice_monitor = SliceMonitoring() slice_monitor.add_topology(slice_urn, SliceMonitoring.PROVISIONED, client_urn) except Exception as e: logger.warning( "Delegate could not send Provision trigger" + " to MS. Details: %s", (e, )) route = db_sync_manager.get_slice_routing_keys(urns) logger.debug("Route=%s" % (route, )) for r, v in route.iteritems(): peer = db_sync_manager.get_configured_peer_by_routing_key(r) logger.debug("peer=%s" % (peer, )) if peer.get("type") == self._allowed_peers.get("PEER_CRM"): com_m_info, com_slivers = COMUtils().manage_provision( peer, v, credentials, best_effort, end_time, geni_users) logger.debug("com_m=%s, com_s=%s" % ( com_m_info, com_slivers, )) for n in com_m_info.get("nodes"): ro_manifest.com_node(n) ro_slivers.extend(com_slivers) # introduce slice-monitoring info for C resources try: slice_monitor.add_c_resources(slice_urn, com_m_info.get("nodes")) except Exception as e: logger.warning( "Delegate could not monitor COM resources" + " upon Provision. Details: %s", (e, )) elif peer.get("type") == self._allowed_peers.get("PEER_SDNRM"): of_m_info, of_slivers = SDNUtils().manage_provision( peer, v, credentials, best_effort, end_time, geni_users) logger.debug("of_m=%s, of_s=%s" % ( of_m_info, of_slivers, )) for s in of_m_info.get("slivers"): ro_manifest.of_sliver(s) ro_slivers.extend(of_slivers) # introduce slice-monitoring info for SDN resources try: slice_monitor.add_sdn_resources(slice_urn, of_m_info.get("slivers")) except Exception as e: logger.warning( "Delegate could not monitor SDN resources" + " upon Provision. Details: %s", (e, )) elif peer.get("type") == self._allowed_peers.get("PEER_TNRM"): tn_m_info, tn_slivers = TNUtils().manage_provision( peer, v, credentials, best_effort, end_time, geni_users) logger.debug("tn_m=%s, tn_s=%s" % ( tn_m_info, tn_slivers, )) for n in tn_m_info.get("nodes"): ro_manifest.tn_node(n) for l in tn_m_info.get("links"): ro_manifest.tn_link(l) ro_slivers.extend(tn_slivers) # introduce slice-monitoring info for TN resources try: slice_monitor.add_tn_resources(slice_urn, tn_m_info.get("nodes"), tn_m_info.get("links"), peer) except Exception as e: logger.warning( "Delegate could not monitor TN resources" + " upon Provision. Details: %s", (e, )) elif peer.get("type") == self._allowed_peers.get("PEER_SERM"): se_m_info, se_slivers = SEUtils().manage_provision( peer, v, credentials, best_effort, end_time, geni_users) logger.debug("se_m=%s, se_s=%s" % ( se_m_info, se_slivers, )) for n in se_m_info.get("nodes"): ro_manifest.se_node(n) for l in se_m_info.get("links"): ro_manifest.se_link(l) ro_slivers.extend(se_slivers) # introduce slice-monitoring info for SE resources try: slice_monitor.add_se_resources(slice_urn, se_m_info.get("nodes"), se_m_info.get("links")) except Exception as e: logger.warning( "Delegate could not monitor SE resources" + " upon Provision. Details: %s", (e, )) elif peer.get("type") == self._allowed_peers.get("PEER_RO"): ro_m_info, ro_slivers_ro = ROUtils().manage_provision( peer, v, credentials, best_effort, end_time, geni_users) logger.debug("ro_m=%s, ro_s=%s" % ( ro_m_info, ro_slivers, )) ro_manifest = ROUtils.generate_describe_manifest( ro_manifest, ro_m_info) ro_slivers.extend(ro_slivers_ro) # introduce slice-monitoring info for ALL the resource types! try: slice_monitor.add_c_resources(slice_urn, ro_m_info.get("com_nodes")) slice_monitor.add_sdn_resources( slice_urn, ro_m_info.get("sdn_slivers")) slice_monitor.add_tn_resources(slice_urn, ro_m_info.get("tn_nodes"), ro_m_info.get("tn_links"), peer) slice_monitor.add_se_resources(slice_urn, ro_m_info.get("se_nodes"), ro_m_info.get("se_links")) except Exception as e: logger.warning( "Delegate could not monitor RO resources" + " upon Provision. Details: %s", (e, )) # send slice-monitoring info to the monitoring system try: # Before sending the slice info, we need to add some "virtual" # links (island-to-island)! slice_monitor.add_island_to_island_links(slice_urn) slice_monitor.send() # add slice_monitoring object to the slice table db_sync_manager.store_slice_monitoring_info( slice_urn, slice_monitor.serialize()) except Exception as e: logger.warning( "Delegate could not send (store) slice monitoring" + " information upon Provision. Details: %s", (e, )) logger.debug("RO-ManifestFormatter=%s" % (ro_manifest, )) # In order to prevent data conversion error, we manipulate the # geni-expires parameter. At least 1 parameter should be not null! valid_geni_expires = None for s in ro_slivers: if s["geni_expires"] is not None: valid_geni_expires = s["geni_expires"] break ro_slivers = CommonUtils.convert_sliver_dates_to_datetime( ro_slivers, valid_geni_expires) return ("%s" % ro_manifest, ro_slivers)
def provision(self, urns, client_cert, credentials, best_effort, end_time, geni_users): """Documentation see [geniv3rpc] GENIv3DelegateBase. {geni_users} is relevant here.""" ro_manifest, ro_slivers = ROManifestFormatter(), [] client_urn = CommonUtils.fetch_user_name_from_geni_users(geni_users) slice_urn = db_sync_manager.get_slice_urn(urns) slice_monitor = None try: slice_monitor = SliceMonitoring() slice_monitor.add_topology(slice_urn, SliceMonitoring.PROVISIONED, client_urn) except Exception as e: logger.warning("Delegate could not send Provision trigger" + " to MS. Details: %s", (e,)) route = db_sync_manager.get_slice_routing_keys(urns) logger.debug("Route=%s" % (route,)) for r, v in route.iteritems(): peer = db_sync_manager.get_configured_peer_by_routing_key(r) logger.debug("peer=%s" % (peer,)) if peer.get("type") == self._allowed_peers.get("PEER_CRM"): com_m_info, com_slivers = COMUtils().manage_provision( peer, v, credentials, best_effort, end_time, geni_users) logger.debug("com_m=%s, com_s=%s" % (com_m_info, com_slivers,)) for n in com_m_info.get("nodes"): ro_manifest.com_node(n) ro_slivers.extend(com_slivers) # introduce slice-monitoring info for C resources try: slice_monitor.add_c_resources( slice_urn, com_m_info.get("nodes")) except Exception as e: logger.warning("Delegate could not monitor COM resources" + " upon Provision. Details: %s", (e,)) elif peer.get("type") == self._allowed_peers.get("PEER_SDNRM"): of_m_info, of_slivers = SDNUtils().manage_provision( peer, v, credentials, best_effort, end_time, geni_users) logger.debug("of_m=%s, of_s=%s" % (of_m_info, of_slivers,)) for s in of_m_info.get("slivers"): ro_manifest.of_sliver(s) ro_slivers.extend(of_slivers) # introduce slice-monitoring info for SDN resources try: slice_monitor.add_sdn_resources( slice_urn, of_m_info.get("slivers")) except Exception as e: logger.warning("Delegate could not monitor SDN resources" + " upon Provision. Details: %s", (e,)) elif peer.get("type") == self._allowed_peers.get("PEER_TNRM"): tn_m_info, tn_slivers = TNUtils().manage_provision( peer, v, credentials, best_effort, end_time, geni_users) logger.debug("tn_m=%s, tn_s=%s" % (tn_m_info, tn_slivers,)) for n in tn_m_info.get("nodes"): ro_manifest.tn_node(n) for l in tn_m_info.get("links"): ro_manifest.tn_link(l) ro_slivers.extend(tn_slivers) # introduce slice-monitoring info for TN resources try: slice_monitor.add_tn_resources( slice_urn, tn_m_info.get("nodes"), tn_m_info.get("links"), peer) except Exception as e: logger.warning("Delegate could not monitor TN resources" + " upon Provision. Details: %s", (e,)) elif peer.get("type") == self._allowed_peers.get("PEER_SERM"): se_m_info, se_slivers = SEUtils().manage_provision( peer, v, credentials, best_effort, end_time, geni_users) logger.debug("se_m=%s, se_s=%s" % (se_m_info, se_slivers,)) for n in se_m_info.get("nodes"): ro_manifest.se_node(n) for l in se_m_info.get("links"): ro_manifest.se_link(l) ro_slivers.extend(se_slivers) # introduce slice-monitoring info for SE resources try: slice_monitor.add_se_resources( slice_urn, se_m_info.get("nodes"), se_m_info.get("links")) except Exception as e: logger.warning("Delegate could not monitor SE resources" + " upon Provision. Details: %s", (e,)) elif peer.get("type") == self._allowed_peers.get("PEER_RO"): ro_m_info, ro_slivers_ro = ROUtils().manage_provision( peer, v, credentials, best_effort, end_time, geni_users) logger.debug("ro_m=%s, ro_s=%s" % (ro_m_info, ro_slivers,)) ro_manifest = ROUtils.generate_describe_manifest( ro_manifest, ro_m_info) ro_slivers.extend(ro_slivers_ro) # introduce slice-monitoring info for ALL the resource types! try: slice_monitor.add_c_resources( slice_urn, ro_m_info.get("com_nodes")) slice_monitor.add_sdn_resources( slice_urn, ro_m_info.get("sdn_slivers")) slice_monitor.add_tn_resources( slice_urn, ro_m_info.get("tn_nodes"), ro_m_info.get("tn_links"), peer) slice_monitor.add_se_resources( slice_urn, ro_m_info.get("se_nodes"), ro_m_info.get("se_links")) except Exception as e: logger.warning("Delegate could not monitor RO resources" + " upon Provision. Details: %s", (e,)) # send slice-monitoring info to the monitoring system try: # Before sending the slice info, we need to add some "virtual" # links (island-to-island)! slice_monitor.add_island_to_island_links(slice_urn) slice_monitor.send() # add slice_monitoring object to the slice table db_sync_manager.store_slice_monitoring_info( slice_urn, slice_monitor.serialize()) except Exception as e: logger.warning("Delegate could not send (store) slice monitoring" + " information upon Provision. Details: %s", (e,)) logger.debug("RO-ManifestFormatter=%s" % (ro_manifest,)) # In order to prevent data conversion error, we manipulate the # geni-expires parameter. At least 1 parameter should be not null! valid_geni_expires = None for s in ro_slivers: if s["geni_expires"] is not None: valid_geni_expires = s["geni_expires"] break ro_slivers = CommonUtils.convert_sliver_dates_to_datetime( ro_slivers, valid_geni_expires) return ("%s" % ro_manifest, ro_slivers)