def dump(self): """ (Debug function). Dump the ExploreTask embeded in this Stack using the logger. """ for priority in [TASK_11, TASK_1Nsq, TASK_1N]: Log.tmp("PRIO %d : %r" % (priority, self.tasks[priority]))
def manifold_to_sfa_leases(cls, leases, slice_urn): from datetime import datetime sfa_leases = [] for lease in leases: sfa_lease = dict() # sfa_lease_id = sfa_lease['component_id'] = lease['resource'] sfa_lease['slice_id'] = slice_urn sfa_lease['start_time'] = lease['start_time'] grain = cls.get_grain() # in seconds min_duration = cls.get_min_duration() # in seconds # We either need end_time or duration # end_time is choosen if both are specified ! if 'end_time' in lease: sfa_lease['end_time'] = lease['end_time'] duration = (int(lease['end_time']) - int(lease['start_time'])) / grain if duration < min_duration: raise Exception, 'duration < min_duration' sfa_lease['duration'] = duration elif 'duration' in lease: sfa_lease['duration'] = lease['duration'] sfa_lease['end_time'] = lease['start_time'] + lease['duration'] else: raise Exception, 'Lease not specifying neither end_time nor duration' # timestamp -> UTC YYYY-MM-DD hh:mm:ss Log.tmp("manifold to sfa - convert timestamp %s to UTC", sfa_lease['start_time']) sfa_lease['start_time'] = datetime.utcfromtimestamp(int(sfa_lease['start_time'])).strftime('%Y-%m-%d %H:%M:%S') Log.tmp("manifold to sfa - convert timestamp to UTC %s", sfa_lease['start_time']) sfa_lease['end_time'] = datetime.utcfromtimestamp(int(sfa_lease['end_time'])).strftime('%Y-%m-%d %H:%M:%S') sfa_leases.append(sfa_lease) return sfa_leases
def _process_leases(cls, leases): from datetime import datetime import time import dateutil.parser import calendar ret = list() try: for lease in leases: lease['resource'] = lease.pop('component_id') lease['slice'] = lease.pop('slice_id') # UTC YYYY-MM-DD hh:mm:ss -> timestamp Log.tmp("PARSING - convert UTC %s to timestamp", lease['start_time']) lease['start_time'] = calendar.timegm(dateutil.parser.parse(lease['start_time']).utctimetuple()) Log.tmp("PARSING - convert UTC to timestamp %s", lease['start_time']) lease['duration'] = int(lease['duration']) if 'end_time' in lease: lease['end_time'] = int(lease['end_time']) if not 'end_time' in lease and set(['start_time', 'duration']) <= set(lease.keys()): lease['end_time'] = lease['start_time'] + lease['duration'] * cls.get_grain() elif not 'duration' in lease and set(lease.keys()) <= set(['start_time', 'end_time']): lease['duration'] = (lease['end_time'] - lease['start_time']) / cls.get_grain() # XXX GRANULARITY Hardcoded for the moment if 'granularity' not in lease: lease['granularity'] = cls.get_grain() ret.append(lease) except Exception, e: print "EEE::", e import traceback traceback.print_exc()
def execute_query(self, namespace, query, annotations, is_deferred=False): if annotations: user = annotations.get('user', None) else: user = None # Code duplication with Interface() class if namespace is not None: allowed_platforms = [ p['platform'] for p in self.platforms if p['platform'] == namespace ] else: allowed_platforms = [p['platform'] for p in self.platforms] qp = QueryPlan() qp.build(query, self.g_3nf, allowed_platforms, self.allowed_capabilities, user) Log.tmp("QUERY PLAN") qp.dump() self.instanciate_gateways(qp, user) Log.info("QUERY PLAN:\n%s" % (qp.dump())) return self.execute_query_plan(namespace, query, annotations, qp, is_deferred)
def __init__(self, router=None, platform=None, query=None, config=None, user_config=None, user=None, format='record'): assert format in ['record', 'object' ], 'Unknown return format for gateway SQLAlchemy' if format == 'object': Log.tmp("Objects should not be used") self.format = format super(SQLAlchemyGateway, self).__init__(router, platform, query, config, user_config, user) from manifold.models.base import Base Base = declarative_base(cls=Base) # Models from manifold.models.platform import Platform as DBPlatform from manifold.models.user import User as DBUser from manifold.models.account import Account as DBAccount from manifold.models.session import Session as DBSession engine = create_engine(config['url'], echo=False, pool_recycle=3600) Base.metadata.create_all(engine) Session = sessionmaker(bind=engine) self.db = Session()
def delete_cache(self, annotations=None): try: Log.tmp("----------> DELETE CACHE PER USER <------------") Log.tmp(annotations) if annotations is not None: user_id = annotations['user']['user_id'] if user_id in self._cache_user: del self._cache_user[user_id] except: import traceback traceback.print_exc()
def _process_resources(cls, resources): ret = list() for resource in resources: Log.tmp("LOIC - SFAWrap parser type = %s , resource = %r" % (type(resource),resource)) new_resource = cls._process_resource(resource) if not new_resource: continue # We suppose we have children of dict that cannot be serialized # with xmlrpc, let's make dict ret.append(cls.make_dict_rec(new_resource)) return ret
def start(self): """ Fetch records stored in the postgresql database according to self.query """ Log.tmp("Received: %s" % self.query) # Results of the query (TODO) rows = [] # Sending rows to parent processing node in the AST map(self.send, Records(rows)) self.send(LastRecord())
def start(self): """ Fetch records stored in the postgresql database according to self.query """ sql = PostgreSQLGateway.to_sql(self.query) Log.tmp(sql) if self.query.get_action() in [ACTION_GET, ACTION_CREATE]: rows = self.selectall(sql) else: count = self.do(sql) if self.query.get_select(): select_query = self.query.copy() select_query.action = ACTION_GET if self.query.get_action() == ACTION_DELETE: select_query.filter_by(None) select_sql = PostgreSQLGateway.to_sql(select_query) rows = self.selectall(select_sql) else: rows = list() map(self.send, Records(rows)) self.send(LastRecord())
def append_record(self, record): if record.is_last(): # Move all pending records to records... self._records = self._pending_records #self._pending_records = list() #self._query_started = False # False means no query started self._pending_records = None # ... and inform interested operators else: try: # Add the records in the pending list... self._pending_records.append(record) # ... and inform interested operators except Exception, e: # XXX TO BE FIXED Log.tmp("Cache_Entry append_record Exeption:", e) Log.tmp("record = ", record) # TMP CACHE DEBUG #import pdb #pdb.set_trace() raise
def optimize_selection(self, filter): # LEFT JOIN # We are pushing selections down as much as possible: # - selection on filters on the left: can push down in the left child # - selection on filters on the right: cannot push down unless the field is on both sides # - selection on filters on the key / common fields ??? TODO parent_filter, left_filter, right_filter = Filter(), Filter(), Filter() for predicate in filter: Log.tmp("1) predicate.get_field_names() = %s" % predicate.get_field_names()) Log.tmp("2) self.left.get_query().get_select() = %s" % self.left.get_query().get_select()) Log.tmp("3) lquery = %s" % self.left.get_query()) if predicate.get_field_names() <= self.left.get_query().get_select( ): left_filter.add(predicate) if predicate.get_field_names() <= self.right.get_query( ).get_select(): right_filter.add(predicate) else: parent_filter.add(predicate) if left_filter: self.left = self.left.optimize_selection(left_filter) #selection = Selection(self.left, left_filter) #selection.query = self.left.copy().filter_by(left_filter) self.left.set_callback(self.left_callback) #self.left = selection if right_filter: self.right = self.right.optimize_selection(right_filter) self.right.set_callback(self.right_callback) if parent_filter: old_self_callback = self.get_callback() selection = Selection(self, parent_filter) # XXX do we need to set query here ? #selection.query = self.query.copy().filter_by(parent_filter) selection.set_callback(old_self_callback) return selection return self
def main(self): """ \brief Runs a XMLRPC server """ Log.info("XMLRPC server daemon (%s) started." % sys.argv[0]) # NOTE it is important to import those files only after daemonization, # since they open files we cannot easily preserve from twisted.web import xmlrpc, server # SSL support from OpenSSL import SSL from twisted.internet import ssl #, reactor #from twisted.internet.protocol import Factory, Protocol #from twisted.internet import reactor # This also imports manifold.util.reactor_thread that uses reactor from manifold.core.router import Router assert not (Options().platform and Options().gateway), "Both gateway and platform cannot be specified at commandline" # This imports twisted code so we need to import it locally from manifold.core.xmlrpc_api import XMLRPCAPI # This should be configurable allowed_capabilities = Capabilities() allowed_capabilities.selection = True allowed_capabilities.projection = True # XXX We should harmonize interfaces between Router and Forwarder if Options().platform: platforms = Storage.execute(Query().get('platform'), format='object') # We pass a single platform to Forwarder platform = [p for p in platforms if p.name == Options().platform][0] self.interface = Forwarder(platform, allowed_capabilities) elif Options().gateway: # XXX user # XXX Change Forwarded initializer #DEPRECATED| platform = Platform(u'dummy', Options().gateway, self.get_gateway_config(Options().gateway), 'user') platform = Platform( platform = u'dummy', gateway_type = Options().gateway, config = self.get_gateway_config(Options().gateway), auth_type = 'user' ) self.interface = Forwarder(platform, allowed_capabilities) else: self.interface = Router() try: def verifyCallback(connection, x509, errnum, errdepth, ok): if not ok: print 'invalid cert from subject:', x509.get_subject() print errnum, errdepth return False else: print "Certs are fine", x509, x509.get_subject() return True ssl_path = Options().ssl_path if not ssl_path or not os.path.exists(ssl_path): print "" print "You need to generate SSL keys and certificate in '%s' to be able to run manifold" % ssl_path print "" print "mkdir -p /etc/manifold/keys" print "openssl genrsa 1024 > /etc/manifold/keys/server.key" print "chmod 400 /etc/manifold/keys/server.key" print "openssl req -new -x509 -nodes -sha1 -days 365 -key /etc/manifold/keys/server.key > /etc/manifold/keys/server.cert" print "" sys.exit(0) server_key_file = "%s/server.key" % ssl_path server_crt_file = "%s/server.cert" % ssl_path Log.tmp("key, cert=", server_key_file, server_crt_file) myContextFactory = ssl.DefaultOpenSSLContextFactory(server_key_file, server_crt_file) ctx = myContextFactory.getContext() ctx.set_verify( SSL.VERIFY_PEER, # | SSL.VERIFY_FAIL_IF_NO_PEER_CERT, verifyCallback ) # Since we have self-signed certs we have to explicitly # tell the server to trust them. #ctx.load_verify_locations("keys/ca.pem") trusted_roots_path = Options().trusted_roots_path if not trusted_roots_path or not os.path.exists(trusted_roots_path): Log.warning("No trusted root found in %s. You won't be able to login using SSL client certificates" % trusted_roots_path) ctx.load_verify_locations(None, ssl_path) #ReactorThread().listenTCP(Options().xmlrpc_port, server.Site(XMLRPCAPI(self.interface, allowNone=True))) ReactorThread().listenSSL(Options().xmlrpc_port, server.Site(XMLRPCAPI(self.interface, allowNone=True)), myContextFactory) ReactorThread().start_reactor() except Exception, e: # TODO If database gets disconnected, we can sleep/attempt reconnection Log.error("Error in XMLRPC API: %s" % str(e))
def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'): """ Builds a RSpecs based on the specified resources and leases. Args: slice_urn (string) : the urn of the slice [UNUSED] [HRN in NITOS ?] Returns: string : the string version of the created RSpec. """ import time start_time = None end_time = None # Default duration for WiLab is 2 hours duration_default = 120 for lease in leases: if 'end_time' in lease: end_time = lease['end_time'] start_time = lease['start_time'] break if start_time is None: # start_time = Now start_time = time.time() if end_time is None: end_time = int(start_time + duration_default*60) #raise Exception, "end_time is mandatory in leases" # duration in seconds from now till end_time duration = end_time - start_time # duration in minutes duration = duration / 60 duration = int(duration) if duration < duration_default: duration = duration_default Log.tmp("start_time = ",start_time) Log.tmp("end_time = ",end_time) Log.tmp("duration = ",duration) # RSpec will have expires date = now + duration rspec = RSpec(version=rspec_version, ttl=duration, expires=end_time) nodes = [] channels = [] links = [] # XXX Here it is only about mappings and hooks between ontologies i = 0 for urn in resources: # XXX TO BE CORRECTED, this handles None values if not urn: continue resource = dict() # TODO: take into account the case where we send a dict of URNs without keys #resource['component_id'] = resource.pop('urn') resource['component_id'] = urn resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id']) # build component_manager_id # The only change for WiLab compared to Generic SFAWrapParser cm = urn.split("+") resource['component_manager_id'] = "%s+%s+authority+cm" % (cm[0],cm[1]) #print "resource_type", resource_type if resource_type == 'node': #print "NODE", resource, cls resource['client_id'] = "PC" + str(i) resource = cls.on_build_resource_hook(resource) nodes.append(resource) elif resource_type == 'link': links.append(resource) elif resource_type == 'channel': channels.append(resource) else: raise Exception, "Not supported type of resource" i = i + 1 #for node in nodes: # print "NODE:", node rspec.version.add_nodes(nodes, rspec_content_type="request") #rspec.version.add_links(links) #rspec.version.add_channels(channels) #sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn) ##print "sfa_leases", sfa_leases #if sfa_leases: # # SFAWRAP BUG ??? # # rspec.version.add_leases bugs with an empty set of leases # # slice_id = leases[0]['slice_id'] # # TypeError: list indices must be integers, not str # rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now return rspec.toxml()
def process(self, query, record, annotation, is_query): if not record: Log.tmp("LOG TARGET: %s %r" % (query, annotation)) else: Log.tmp("LOG TARGET RECORDS: %s %r" % (query, annotation)) return (TargetValue.CONTINUE, None)