Esempio n. 1
0
    def actual_caller_hrn (self):
        """a helper method used by some API calls like e.g. Allocate
        to try and find out who really is the original caller
        
        This admittedly is a bit of a hack, please USE IN LAST RESORT
        
        This code uses a heuristic to identify a delegated credential

        A first known restriction if for traffic that gets through a slice manager
        in this case the hrn reported is the one from the last SM in the call graph
        which is not at all what is meant here"""

        caller_hrn = self.get_gid_caller().get_hrn()
        issuer_hrn = self.get_signature().get_issuer_gid().get_hrn()
        subject_hrn = self.get_gid_object().get_hrn()
        # if we find that the caller_hrn is an immediate descendant of the issuer, then
        # this seems to be a 'regular' credential
        if caller_hrn.startswith(issuer_hrn): 
            actual_caller_hrn=caller_hrn
        # else this looks like a delegated credential, and the real caller is the issuer
        else:
            actual_caller_hrn=issuer_hrn
        logger.info("actual_caller_hrn: caller_hrn=%s, issuer_hrn=%s, returning %s"
                    %(caller_hrn,issuer_hrn,actual_caller_hrn))
        return actual_caller_hrn
Esempio n. 2
0
 def init_or_upgrade(self):
     # check if under version control, and initialize it otherwise
     if self.current_version() is None:
         before = "Unknown"
         # can be either a very old version, or a fresh install
         # for very old versions:
         self.handle_old_releases()
         # in any case, initialize db from current code and reflect in migrate
         model.init_tables(self.engine)
         code_version = migrate.version(self.repository)
         migrate.version_control(self.url, self.repository, code_version)
         after = "%s" % self.current_version()
         logger.info("DBSchema : jumped to version %s" % (after))
     else:
         # use migrate in the usual way
         before = "%s" % self.current_version()
         migrate.upgrade(self.url, self.repository)
         after = "%s" % self.current_version()
         if before != after:
             logger.info("DBSchema : upgraded version from %s to %s" %
                         (before, after))
         else:
             logger.debug(
                 "DBSchema : no change needed in db schema (%s==%s)" %
                 (before, after))
Esempio n. 3
0
File: model.py Progetto: aquila/sfa
 def update_pis (self, pi_hrns, dbsession):
     # strip that in case we have <researcher> words </researcher>
     pi_hrns = [ x.strip() for x in pi_hrns ]
     request = dbsession.query (RegUser).filter(RegUser.hrn.in_(pi_hrns))
     logger.info ("RegAuthority.update_pis: %d incoming pis, %d matches found"%(len(pi_hrns),request.count()))
     pis = dbsession.query (RegUser).filter(RegUser.hrn.in_(pi_hrns)).all()
     self.reg_pis = pis
Esempio n. 4
0
 def clean(self):
     try:
         retcod = os.unlink(self.filename)
         logger.info("Cleaned up version cache %s, retcod=%d" %
                     (self.filename, retcod))
     except:
         logger.info("Could not unlink version cache %s" % self.filename)
Esempio n. 5
0
File: model.py Progetto: aquila/sfa
 def update_researchers (self, researcher_hrns, dbsession):
     # strip that in case we have <researcher> words </researcher>
     researcher_hrns = [ x.strip() for x in researcher_hrns ]
     request = dbsession.query (RegUser).filter(RegUser.hrn.in_(researcher_hrns))
     logger.info ("RegSlice.update_researchers: %d incoming researchers, %d matches found"%(len(researcher_hrns),request.count()))
     researchers = dbsession.query (RegUser).filter(RegUser.hrn.in_(researcher_hrns)).all()
     self.reg_researchers = researchers
Esempio n. 6
0
    def delete_router(self, tenant_id):
        is_router = False
        ports = self.driver.shell.network_manager.list_ports()
        ports = ports['ports']
        networks = self.driver.shell.network_manager.list_networks()
        networks = networks['networks']

        # find the subnetwork ID for removing the interface related with private network
        # TOPOLOGY: Public Network -- Router -- Private Network -- VM Instance(s)
        for port in ports:
            if (port.get('tenant_id') == tenant_id) and \
               (port.get('device_owner') == 'network:router_interface'):
                router_id = port.get('device_id')
                port_net_id = port.get('network_id')
        for network in networks:
            if network.get('tenant_id') == tenant_id:
                net_id = network.get('id')
                if port_net_id == net_id:
                    sbnet_ids = network.get('subnets')
                    is_router = True

        if is_router:
            # remove the router's interface which is related with private network
            if sbnet_ids:
                body = {'subnet_id': sbnet_ids[0]}
                self.driver.shell.network_manager.remove_interface_router(
                                                  router=router_id, body=body)
            # remove the router's interface which is related with public network
            self.driver.shell.network_manager.remove_gateway_router(router=router_id)
            # delete the router
            self.driver.shell.network_manager.delete_router(router=router_id)
            logger.info("Deleted the router: Router ID [%s]" % router_id)
        return 1
Esempio n. 7
0
 def update_relation(self, subject_type, target_type, relation_name,
                     subject_id, target_ids):
     # hard-wire the code for slice/user for now, could be smarter if needed
     if subject_type == 'slice' and target_type == 'user' and relation_name == 'researcher':
         subject = self.shell.GetSlices({'slice_id': subject_id})[0]
         if 'user_ids' not in subject.keys():
             subject['user_ids'] = []
         current_target_ids = subject['user_ids']
         add_target_ids = list(
             set(target_ids).difference(current_target_ids))
         del_target_ids = list(
             set(current_target_ids).difference(target_ids))
         logger.debug("subject_id = %s (type=%s)" %
                      (subject_id, type(subject_id)))
         for target_id in add_target_ids:
             self.shell.AddUserToSlice({
                 'user_id': target_id,
                 'slice_id': subject_id
             })
             logger.debug("add_target_id = %s (type=%s)" %
                          (target_id, type(target_id)))
         for target_id in del_target_ids:
             logger.debug("del_target_id = %s (type=%s)" %
                          (target_id, type(target_id)))
             self.shell.DeleteUserFromSlice({
                 'user_id': target_id,
                 'slice_id': subject_id
             })
     else:
         logger.info('unexpected relation %s to maintain, %s -> %s' %
                     (relation_name, subject_type, target_type))
Esempio n. 8
0
 def update_relation(self, subject_type, target_type, relation_name,
                     subject_id, target_ids):
     # hard-wire the code for slice/user for now, could be smarter if needed
     if subject_type == 'slice' and target_type == 'user' and relation_name == 'researcher':
         subject = self.shell.GetSlices(subject_id)[0]
         current_target_ids = subject['person_ids']
         add_target_ids = list(
             set(target_ids).difference(current_target_ids))
         del_target_ids = list(
             set(current_target_ids).difference(target_ids))
         logger.debug("subject_id = %s (type=%s)" %
                      (subject_id, type(subject_id)))
         for target_id in add_target_ids:
             self.shell.AddPersonToSlice(target_id, subject_id)
             logger.debug("add_target_id = %s (type=%s)" %
                          (target_id, type(target_id)))
         for target_id in del_target_ids:
             logger.debug("del_target_id = %s (type=%s)" %
                          (target_id, type(target_id)))
             self.shell.DeletePersonFromSlice(target_id, subject_id)
     elif subject_type == 'authority' and target_type == 'user' and relation_name == 'pi':
         # due to the plcapi limitations this means essentially adding pi role to all people in the list
         # it's tricky to remove any pi role here, although it might be desirable
         persons = self.shell.GetPersons({
             'peer_id': None,
             'person_id': target_ids
         })
         for person in persons:
             if 'pi' not in person['roles']:
                 self.shell.AddRoleToPerson('pi', person['person_id'])
     else:
         logger.info('unexpected relation %s to maintain, %s -> %s' %
                     (relation_name, subject_type, target_type))
    def node_to_rspec_node(self, node, options={}):
        rspec_node = NodeElement()
        site=self.driver.testbedInfo
        rspec_node['component_id'] = hostname_to_urn(self.driver.hrn, site['name'], node['hostname'])
        rspec_node['component_name'] = node['hostname']
        rspec_node['ip'] = node['ip']
        rspec_node['protocol'] = node['protocol']     
        rspec_node['port'] = node['port']                
        rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
        rspec_node['authority_id'] = hrn_to_urn(unigetestbedXrn.site_hrn(self.driver.hrn, site['name']), 'authority+sa')
        #distinguish between Shared and Reservable nodes
        rspec_node['exclusive'] = 'false'

        rspec_node['hardware_types'] = [HardwareType({'name': 'endpoint'}),
                                        HardwareType({'name': 'sensor'})]
        
        resources = []
        for resource in node['resources']:
            resources.append(Resource({'name':resource.get('name'),'path':resource.get('path'),
                                       'type':resource.get('type'),
                                       'unit':resource.get('unit'),
                                       'data_type':resource.get('datatype')}))
        
        rspec_node['resources'] = resources
        logger.info(rspec_node)
        
        
        if site['longitude'] and site['latitude']:
            location = Location({'longitude': site['longitude'], 'latitude': site['latitude'], 'country': 'unknown'})
            rspec_node['location'] = location
        
        logger.info(rspec_node);        
        return rspec_node
Esempio n. 10
0
    def update_relation(self, subject_type, target_type, relation_name,
                        subject_id, target_ids):

        if subject_type == 'slice' and target_type == 'user' and relation_name == 'researcher':
            subject = self.shell.getSlices({'slice_id': subject_id}, [])[0]
            current_target_ids = subject['user_ids']
            add_target_ids = list(
                set(target_ids).difference(current_target_ids))
            del_target_ids = list(
                set(current_target_ids).difference(target_ids))
            logger.debug("subject_id = %s (type=%s)" %
                         (subject_id, type(subject_id)))
            for target_id in add_target_ids:
                self.shell.addUserToSlice({
                    'user_id': target_id,
                    'slice_id': subject_id
                })
                logger.debug("add_target_id = %s (type=%s)" %
                             (target_id, type(target_id)))
            for target_id in del_target_ids:
                logger.debug("del_target_id = %s (type=%s)" %
                             (target_id, type(target_id)))
                self.shell.deleteUserFromSlice({
                    'user_id': target_id,
                    'slice_id': subject_id
                })
        else:
            logger.info('unexpected relation %s to maintain, %s -> %s' %
                        (relation_name, subject_type, target_type))
Esempio n. 11
0
def install_peer_certs(server_key_file, server_cert_file):
    """
    Attempt to install missing trusted gids and db records for 
    our federated interfaces
    """
    # Attempt to get any missing peer gids
    # There should be a gid file in /etc/sfa/trusted_roots for every
    # peer registry found in in the registries.xml config file. If there
    # are any missing gids, request a new one from the peer registry.
    api = SfaAPI(key_file=server_key_file, cert_file=server_cert_file)
    registries = Registries()
    aggregates = Aggregates()
    interfaces = dict(registries.items() + aggregates.items())
    gids_current = api.auth.trusted_cert_list
    hrns_current = [gid.get_hrn() for gid in gids_current]
    hrns_expected = set([hrn for hrn in interfaces])
    new_hrns = set(hrns_expected).difference(hrns_current)
    # gids = self.get_peer_gids(new_hrns) + gids_current
    peer_gids = []
    if not new_hrns:
        return

    trusted_certs_dir = api.config.get_trustedroots_dir()
    for new_hrn in new_hrns:
        if not new_hrn:
            continue
        # the gid for this interface should already be installed
        if new_hrn == api.config.SFA_INTERFACE_HRN:
            continue
        try:
            # get gid from the registry
            url = interfaces[new_hrn].get_url()
            interface = interfaces[new_hrn].get_server(server_key_file, server_cert_file, timeout=30)
            # skip non sfa aggregates
            server_version = api.get_cached_server_version(interface)
            if "sfa" not in server_version:
                logger.info("get_trusted_certs: skipping non sfa aggregate: %s" % new_hrn)
                continue

            trusted_gids = interface.get_trusted_certs()
            if trusted_gids:
                # the gid we want should be the first one in the list,
                # but lets make sure
                for trusted_gid in trusted_gids:
                    # default message
                    message = "interface: %s\t" % (api.interface)
                    message += "unable to install trusted gid for %s" % (new_hrn)
                    gid = GID(string=trusted_gids[0])
                    peer_gids.append(gid)
                    if gid.get_hrn() == new_hrn:
                        gid_filename = os.path.join(trusted_certs_dir, "%s.gid" % new_hrn)
                        gid.save_to_file(gid_filename, save_parents=True)
                        message = "installed trusted cert for %s" % new_hrn
                    # log the message
                    api.logger.info(message)
        except:
            message = "interface: %s\tunable to install trusted gid for %s" % (api.interface, new_hrn)
            api.logger.log_exc(message)
    # doesnt matter witch one
    update_cert_records(peer_gids)
def init_server():
    logger = logging.getLogger('EucaAggregate')
    fileHandler = logging.FileHandler('/var/log/euca.log')
    fileHandler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
    logger.addHandler(fileHandler)
    fileHandler.setLevel(logging.DEBUG)
    logger.setLevel(logging.DEBUG)

    configParser = ConfigParser()
    configParser.read(['/etc/sfa/eucalyptus_aggregate.conf', 'eucalyptus_aggregate.conf'])
    if len(configParser.sections()) < 1:
        logger.error('No cloud defined in the config file')
        raise Exception('Cannot find cloud definition in configuration file.')

    # Only read the first section.
    cloudSec = configParser.sections()[0]
    cloud['name'] = cloudSec
    cloud['access_key'] = configParser.get(cloudSec, 'access_key')
    cloud['secret_key'] = configParser.get(cloudSec, 'secret_key')
    cloud['cloud_url']  = configParser.get(cloudSec, 'cloud_url')
    cloudURL = cloud['cloud_url']
    if cloudURL.find('https://') >= 0:
        cloudURL = cloudURL.replace('https://', '')
    elif cloudURL.find('http://') >= 0:
        cloudURL = cloudURL.replace('http://', '')
    (cloud['ip'], parts) = cloudURL.split(':')

    # Create image bundles
    images = getEucaConnection().get_all_images()
    cloud['images'] = images
    cloud['imageBundles'] = {}
    for i in images:
        if i.type != 'machine' or i.kernel_id is None: continue
        name = os.path.dirname(i.location)
        detail = {'imageID' : i.id, 'kernelID' : i.kernel_id, 'ramdiskID' : i.ramdisk_id}
        cloud['imageBundles'][name] = detail

    # Initialize sqlite3 database and tables.
    dbPath = '/etc/sfa/db'
    dbName = 'euca_aggregate.db'

    if not os.path.isdir(dbPath):
        logger.info('%s not found. Creating directory ...' % dbPath)
        os.mkdir(dbPath)

    conn = connectionForURI('sqlite://%s/%s' % (dbPath, dbName))
    sqlhub.processConnection = conn
    Slice.createTable(ifNotExists=True)
    EucaInstance.createTable(ifNotExists=True)
    Meta.createTable(ifNotExists=True)

    # Start the update process to keep track of the meta data
    # about Eucalyptus instance.
    Process(target=updateMeta).start()

    # Make sure the schema exists.
    if not os.path.exists(EUCALYPTUS_RSPEC_SCHEMA):
        err = 'Cannot location schema at %s' % EUCALYPTUS_RSPEC_SCHEMA
        logger.error(err)
        raise Exception(err)
    def reserveInstance(self, botoConn, pubKeys):
        logger = logging.getLogger('EucaAggregate')
        logger.info('Reserving an instance: image: %s, kernel: ' \
                    '%s, ramdisk: %s, type: %s, key: %s' % \
                    (self.image_id, self.kernel_id, self.ramdisk_id,
                    self.inst_type, self.key_pair))

        # XXX The return statement is for testing. REMOVE in production
        #return

        try:
            reservation = botoConn.run_instances(self.image_id,
                                                 kernel_id = self.kernel_id,
                                                 ramdisk_id = self.ramdisk_id,
                                                 instance_type = self.inst_type,
                                                 key_name  = self.key_pair,
                                                 user_data = pubKeys)
            for instance in reservation.instances:
                self.instance_id = instance.id

        # If there is an error, destroy itself.
        except EC2ResponseError, ec2RespErr:
            errTree = ET.fromstring(ec2RespErr.body)
            msg = errTree.find('.//Message')
            logger.error(msg.text)
            self.destroySelf()
Esempio n. 14
0
    def get_euca_connection(self, project_name=None):
        if not has_boto:
            logger.info('Unable to access EC2 API - boto library not found.')
            return None

        if not self.access_key or not self.secret_key:
            self.init_context(project_name)
        
        url = self.config.SFA_NOVA_API_URL
        host = None
        port = None    
        path = "/"
        use_ssl = False
        # Split the url into parts 
        if url.find('https://') >= 0:
            use_ssl  = True
            url = url.replace('https://', '')
        elif url.find('http://') >= 0:
            use_ssl  = False
            url = url.replace('http://', '')
        parts = url.split(':')
        host = parts[0]
        if len(parts) > 1:
            parts = parts[1].split('/')
            port = int(parts[0])
            parts = parts[1:]
            path = '/'+'/'.join(parts)
        return boto.connect_ec2(aws_access_key_id=self.access_key,
                                aws_secret_access_key=self.secret_key,
                                is_secure=use_ssl,
                                region=RegionInfo(None, 'eucalyptus', host),
                                host=host,
                                port=port,
                                path=path) 
Esempio n. 15
0
    def get_rspec(self, slice_xrn=None, version = None, options={}):

        version_manager = VersionManager()
        version = version_manager.get_version(version)
        if not slice_xrn:
            rspec_version = version_manager._get_version(version.type, version.version, 'ad')
        else:
            rspec_version = version_manager._get_version(version.type, version.version, 'manifest')

        slice, slivers = self.get_slice_and_slivers(slice_xrn)
        rspec = RSpec(version=rspec_version, user_options=options)
        if slice and 'expires' in slice:
            rspec.xml.set('expires',  datetime_to_string(utcparse(slice['expires'])))

        nodes = self.get_nodes(slice_xrn, slice, slivers, options)
        rspec.version.add_nodes(nodes)
        # add sliver defaults
        default_sliver = slivers.get(None, [])
        if default_sliver:
            default_sliver_attribs = default_sliver.get('tags', [])
            for attrib in default_sliver_attribs:
                 logger.info(attrib)
                 rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
        
        return rspec.toxml()
Esempio n. 16
0
 def init_compute_manager_conn(self):
     from sfa.util.config import Config 
     import sfa.openstack.client as os_client
     opts = os_client.parse_accrc(Config().SFA_NOVA_NOVARC)
     self.shell.compute_manager.connect( username=opts.get('OS_USERNAME'),  \
                                         tenant=opts.get('OS_TENANT_NAME'), \
                                         password=opts.get('OS_PASSWORD')   )
     logger.info( "Initialize Openstack connection from novarc file." )
Esempio n. 17
0
 def update_pis(self, pi_hrns, dbsession):
     # strip that in case we have <researcher> words </researcher>
     pi_hrns = [x.strip() for x in pi_hrns]
     request = dbsession.query(RegUser).filter(RegUser.hrn.in_(pi_hrns))
     logger.info(
         "RegAuthority.update_pis: %d incoming pis, %d matches found" %
         (len(pi_hrns), request.count()))
     pis = dbsession.query(RegUser).filter(RegUser.hrn.in_(pi_hrns)).all()
     self.reg_pis = pis
Esempio n. 18
0
 def createCred(credString=None, credFile=None):
     if not credString and not credFile:
         raise Exception("CredentialFactory.createCred called with no argument")
     if credFile:
         try:
             credString = open(credFile).read()
         except Exception, e:
             logger.info("Error opening credential file %s: %s" % credFile, e)
             return None
Esempio n. 19
0
        def func(*args, **kwds):
            if name not in FdShell.direct_calls:
                raise Exception, "Illegal method call %s for FEDERICA driver"%(name)
            logger.info("Issuing %s args=%s kwds=%s to federica"%\
                            (name,args,kwds))
#            result=getattr(self.proxy, "AggregateManager.%s"%name)(credential, *args, **kwds)
            result=getattr(self.proxy, "AggregateManager.%s"%name)(*args, **kwds)
            logger.debug('FdShell %s (%s) returned ... '%(name,name))
            return result
Esempio n. 20
0
    def scan(self, interfaces, graph):
        if not isinstance(interfaces, list):
            interfaces = [interfaces]

        # remember node to interface mapping
        node2interface = {}
        # add entry points right away using the interface uid's as a key
        to_scan = interfaces
        for i in interfaces:
            graph.add_node(i.uid())
            node2interface[graph.get_node(i.uid())] = i
        scanned = []
        # keep on looping until we reach a fixed point
        # don't worry about abels and shapes that will get fixed later on
        while to_scan:
            for interface in to_scan:
                # performing xmlrpc call
                logger.info("retrieving/fetching version at interface %s" %
                            interface.url())
                version = interface.get_version()
                if not version:
                    logger.info(
                        "<EMPTY GetVersion(); offline or cannot authenticate>")
                else:
                    for (k, v) in version.iteritems():
                        if not isinstance(v, dict):
                            logger.debug("\r\t%s:%s" % (k, v))
                        else:
                            logger.debug(k)
                            for (k1, v1) in v.iteritems():
                                logger.debug("\r\t\t%s:%s" % (k1, v1))
                # proceed with neighbours
                if 'peers' in version:
                    for (next_name, next_url) in version['peers'].iteritems():
                        next_interface = Interface(
                            next_url, mentioned_in=interface.url())
                        # locate or create node in graph
                        try:
                            # if found, we're good with this one
                            next_node = graph.get_node(next_interface.uid())
                        except:
                            # otherwise, let's move on with it
                            graph.add_node(next_interface.uid())
                            next_node = graph.get_node(next_interface.uid())
                            node2interface[next_node] = next_interface
                            to_scan.append(next_interface)
                        graph.add_edge(interface.uid(), next_interface.uid())
                scanned.append(interface)
                to_scan.remove(interface)
            # we've scanned the whole graph, let's get the labels and shapes right
            for node in graph.nodes():
                interface = node2interface.get(node, None)
                if interface:
                    for (k, v) in interface.get_layout().iteritems():
                        node.attr[k] = v
                else:
                    logger.error("MISSED interface with node %s" % node)
Esempio n. 21
0
File: model.py Progetto: tubav/sfa
 def update_pis (self, pi_hrns):
     # don't ruin the import of that file in a client world
     from sfa.storage.alchemy import dbsession
     # strip that in case we have <researcher> words </researcher>
     pi_hrns = [ x.strip() for x in pi_hrns ]
     request = dbsession.query (RegUser).filter(RegUser.hrn.in_(pi_hrns))
     logger.info ("RegAuthority.update_pis: %d incoming pis, %d matches found"%(len(pi_hrns),request.count()))
     pis = dbsession.query (RegUser).filter(RegUser.hrn.in_(pi_hrns)).all()
     self.reg_pis = pis
Esempio n. 22
0
    def scan(self,interfaces,graph):
        if not isinstance(interfaces,list):
            interfaces=[interfaces]

        # remember node to interface mapping
        node2interface={}
        # add entry points right away using the interface uid's as a key
        to_scan=interfaces
        for i in interfaces: 
            graph.add_node(i.uid())
            node2interface[graph.get_node(i.uid())]=i
        scanned=[]
        # keep on looping until we reach a fixed point
        # don't worry about abels and shapes that will get fixed later on
        while to_scan:
            for interface in to_scan:
                # performing xmlrpc call
                version=interface.get_version()
                if self.verbose:
                    logger.info("GetVersion at interface %s"%interface.url())
                    if not version:
                        logger.info("<EMPTY GetVersion(); offline or cannot authenticate>")
                    else: 
                        for (k,v) in version.iteritems(): 
                            if not isinstance(v,dict):
                                logger.info("\r\t%s:%s"%(k,v))
                            else:
                                logger.info(k)
                                for (k1,v1) in v.iteritems():
                                    logger.info("\r\t\t%s:%s"%(k1,v1))
                # 'geni_api' is expected if the call succeeded at all
                # 'peers' is needed as well as AMs typically don't have peers
                if 'geni_api' in version and 'peers' in version: 
                    # proceed with neighbours
                    for (next_name,next_url) in version['peers'].iteritems():
                        next_interface=Interface(next_url)
                        # locate or create node in graph
                        try:
                            # if found, we're good with this one
                            next_node=graph.get_node(next_interface.uid())
                        except:
                            # otherwise, let's move on with it
                            graph.add_node(next_interface.uid())
                            next_node=graph.get_node(next_interface.uid())
                            node2interface[next_node]=next_interface
                            to_scan.append(next_interface)
                        graph.add_edge(interface.uid(),next_interface.uid())
                scanned.append(interface)
                to_scan.remove(interface)
            # we've scanned the whole graph, let's get the labels and shapes right
            for node in graph.nodes():
                interface=node2interface.get(node,None)
                if interface:
                    for (k,v) in interface.get_layout().iteritems():
                        node.attr[k]=v
                else:
                    logger.error("MISSED interface with node %s"%node)
Esempio n. 23
0
File: model.py Progetto: tubav/sfa
 def update_researchers (self, researcher_hrns):
     # don't ruin the import of that file in a client world
     from sfa.storage.alchemy import dbsession
     # strip that in case we have <researcher> words </researcher>
     researcher_hrns = [ x.strip() for x in researcher_hrns ]
     request = dbsession.query (RegUser).filter(RegUser.hrn.in_(researcher_hrns))
     logger.info ("RegSlice.update_researchers: %d incoming researchers, %d matches found"%(len(researcher_hrns),request.count()))
     researchers = dbsession.query (RegUser).filter(RegUser.hrn.in_(researcher_hrns)).all()
     self.reg_researchers = researchers
Esempio n. 24
0
    def fill_record_sfa_info(self, records):
        def startswith(prefix, values):
            return [value for value in values if value.startswith(prefix)]

        # get user ids
        user_ids = []
        for record in records:
            user_ids.extend(record.get("user_ids", []))

        # get the registry records
        user_list, users = [], {}
        user_list = self.api.dbsession().query(RegRecord).filter(
            RegRecord.pointer.in_(user_ids)).all()
        # create a hrns keyed on the sfa record's pointer.
        # Its possible for multiple records to have the same pointer so
        # the dict's value will be a list of hrns.
        users = defaultdict(list)
        for user in user_list:
            users[user.pointer].append(user)

        # get the nitos records
        nitos_user_list, nitos_users = [], {}
        nitos_all_users = self.convert_id(self.shell.getUsers())
        nitos_user_list = [
            user for user in nitos_all_users if user['user_id'] in user_ids
        ]
        nitos_users = list_to_dict(nitos_user_list, 'user_id')

        # fill sfa info
        for record in records:
            if record['pointer'] == -1:
                continue

            sfa_info = {}
            type = record['type']
            logger.info("fill_record_sfa_info - incoming record typed %s" %
                        type)
            if (type == "slice"):
                # all slice users are researchers
                record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
                record['researcher'] = []
                for user_id in record.get('user_ids', []):
                    hrns = [user.hrn for user in users[user_id]]
                    record['researcher'].extend(hrns)

            elif (type == "node"):
                sfa_info['dns'] = record.get("hostname", "")
                # xxx TODO: URI, LatLong, IP, DNS

            elif (type == "user"):
                logger.info('setting user.email')
                sfa_info['email'] = record.get("email", "")
                sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
                sfa_info['geni_certificate'] = record['gid']
                # xxx TODO: PostalAddress, Phone
            record.update(sfa_info)
Esempio n. 25
0
 def __init__(self, ip, port, key_file, cert_file,interface):
     threading.Thread.__init__(self)
     self.key = Keypair(filename = key_file)
     self.cert = Certificate(filename = cert_file)
     #self.server = SecureXMLRPCServer((ip, port), SecureXMLRpcRequestHandler, key_file, cert_file)
     self.server = ThreadedServer((ip, port), SecureXMLRpcRequestHandler, key_file, cert_file)
     self.server.interface=interface
     self.trusted_cert_list = None
     self.register_functions()
     logger.info("Starting SfaServer, interface=%s"%interface)
Esempio n. 26
0
 def log_invalid_cred(cred):
     if not isinstance (cred, StringTypes):
         logger.info("cannot validate credential %s - expecting a string"%cred)
         error="checkCredentials: expected a string, received %s"%(type(cred))
     else:
         cred_obj=Credential(string=cred)
         logger.info("failed to validate credential - dump=%s"%\
                     cred_obj.dump_string(dump_parents=True))
         error = sys.exc_info()[:2]
     return error
Esempio n. 27
0
 def __init__(self, ip, port, key_file, cert_file, interface):
     threading.Thread.__init__(self)
     self.key = Keypair(filename=key_file)
     self.cert = Certificate(filename=cert_file)
     #self.server = SecureXMLRPCServer((ip, port), SecureXMLRpcRequestHandler, key_file, cert_file)
     self.server = ThreadedServer(
         (ip, int(port)), SecureXMLRpcRequestHandler, key_file, cert_file)
     self.server.interface = interface
     self.trusted_cert_list = None
     self.register_functions()
     logger.info("Starting SfaServer, interface=%s" % interface)
Esempio n. 28
0
    def fill_record_sfa_info(self, records):
        
        def startswith(prefix, values):
            return [value for value in values if value.startswith(prefix)]

        # get user ids
        user_ids = []
        for record in records:
            user_ids.extend(record.get("user_ids", []))
        
        # get the registry records
        user_list, users = [], {}
        user_list = dbsession.query(RegRecord).filter(RegRecord.pointer.in_(user_ids)).all()
        # create a hrns keyed on the sfa record's pointer.
        # Its possible for multiple records to have the same pointer so
        # the dict's value will be a list of hrns.
        users = defaultdict(list)
        for user in user_list:
            users[user.pointer].append(user)

        # get the nitos records
        nitos_user_list, nitos_users = [], {}
        nitos_all_users = self.convert_id(self.shell.getUsers())
        nitos_user_list = [user for user in nitos_all_users if user['user_id'] in user_ids]
        nitos_users = list_to_dict(nitos_user_list, 'user_id')


        # fill sfa info
        for record in records:
            if record['pointer'] == -1:
                continue 

            sfa_info = {}
            type = record['type']
            logger.info("fill_record_sfa_info - incoming record typed %s"%type)
            if (type == "slice"):
                # all slice users are researchers
                record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
                record['researcher'] = []
                for user_id in record.get('user_ids', []):
                    hrns = [user.hrn for user in users[user_id]]
                    record['researcher'].extend(hrns)                
                
            elif (type == "node"):
                sfa_info['dns'] = record.get("hostname", "")
                # xxx TODO: URI, LatLong, IP, DNS
    
            elif (type == "user"):
                logger.info('setting user.email')
                sfa_info['email'] = record.get("email", "")
                sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
                sfa_info['geni_certificate'] = record['gid'] 
                # xxx TODO: PostalAddress, Phone
            record.update(sfa_info)
Esempio n. 29
0
        def GetSlices(self,filter={}):
		logger.info("GETSlices")
		logger.info(filter)
		result = []
		result.extend(self.slices)
		if 'slice_name' in filter:
			for slice in self.slices:
				if slice['slice_name'] not in filter['slice_name']:
					result.remove(slice)
		
                return self.slices
Esempio n. 30
0
 def func(*args, **kwds):
     if name not in FdShell.direct_calls:
         raise Exception, "Illegal method call %s for FEDERICA driver" % (
             name)
     logger.info("Issuing %s args=%s kwds=%s to federica"%\
                     (name,args,kwds))
     #            result=getattr(self.proxy, "AggregateManager.%s"%name)(credential, *args, **kwds)
     result = getattr(self.proxy, "AggregateManager.%s" % name)(*args,
                                                                **kwds)
     logger.debug('FdShell %s (%s) returned ... ' % (name, name))
     return result
Esempio n. 31
0
 def update_researchers(self, researcher_hrns, dbsession):
     # strip that in case we have <researcher> words </researcher>
     researcher_hrns = [x.strip() for x in researcher_hrns]
     request = dbsession.query(RegUser).filter(
         RegUser.hrn.in_(researcher_hrns))
     logger.info(
         "RegSlice.update_researchers: %d incoming researchers, %d matches found"
         % (len(researcher_hrns), request.count()))
     researchers = dbsession.query(RegUser).filter(
         RegUser.hrn.in_(researcher_hrns)).all()
     self.reg_researchers = researchers
Esempio n. 32
0
    def LdapAddUser(self, record) :
        """Add SFA user to LDAP if it is not in LDAP  yet.

        :param record: dictionnary with the user's data.
        :returns: a dictionary with the status (Fail= False, Success= True)
            and the uid of the newly added user if successful, or the error
            message it is not. Dict has keys bool and message in case of
            failure, and bool uid in case of success.
        :rtype: dict

        .. seealso:: make_ldap_filters_from_record

        """
        logger.debug(" \r\n \t LDAP LdapAddUser \r\n\r\n ================\r\n ")
        user_ldap_attrs = self.make_ldap_attributes_from_record(record)

        #Check if user already in LDAP wih email, first name and last name
        filter_by = self.make_ldap_filters_from_record(user_ldap_attrs)
        user_exist = self.LdapSearch(filter_by)
        if user_exist:
            logger.warning(" \r\n \t LDAP LdapAddUser user %s %s \
                        already exists" % (user_ldap_attrs['sn'],
                           user_ldap_attrs['mail']))
            return {'bool': False}

        #Bind to the server
        result = self.conn.connect()

        if(result['bool']):

            # A dict to help build the "body" of the object
            logger.debug(" \r\n \t LDAP LdapAddUser attrs %s "
                         % user_ldap_attrs)

            # The dn of our new entry/object
            dn = 'uid=' + user_ldap_attrs['uid'] + "," + self.baseDN

            try:
                ldif = modlist.addModlist(user_ldap_attrs)
                logger.debug("LDAPapi.py add attrs %s \r\n  ldif %s"
                             % (user_ldap_attrs, ldif))
                self.conn.ldapserv.add_s(dn, ldif)

                logger.info("Adding user %s login %s in LDAP"
                            % (user_ldap_attrs['cn'], user_ldap_attrs['uid']))
            except ldap.LDAPError, error:
                logger.log_exc("LDAP Add Error %s" % error)
                return {'bool': False, 'message': error}

            self.conn.close()
            return {'bool': True, 'uid': user_ldap_attrs['uid']}
Esempio n. 33
0
        def AddUserToSlice(self,param):
			logger.info("Add user to slice")
			logger.info(param)
			if not isinstance(param, dict):
				return False
			try:
				for slice in self.slices:
					 if param['slice_id'] == slice['slice_id']:
						 if not 'user_ids' in slice: slice['user_ids'] = []
						 slice['user_ids'].append(param['user_id'])
						 return True
				return False
			except:
				return False		
Esempio n. 34
0
        def AddSliceToNodes(self,param):
			logger.info("AddSliceToNode")
			logger.info(param)
			if not isinstance(param, dict):
				return False
			try:
				for slice in self.slices:
					 if param['slice_id'] == slice['slice_id']:
						 if not 'node_ids' in slice: slice['node_ids'] = []
						 slice['node_ids'].extend(param['node_ids'])
						 return True
				return False
			except:
				return False
Esempio n. 35
0
    def create_user(self, user_name, password, tenant_id, email=None, enabled=True):
        if password is None:
            logger.warning("If you want to make a user, you should include your password!!")
            raise ValueError('You should include your password!!')

        users = self.driver.shell.auth_manager.users.findall()
        for user in users:
            if user_name == user.name:
                user_info = user
                logger.info("The user name[%s] already exists." % user_name)
                break
        else:
            user_info = self.driver.shell.auth_manager.users.create(user_name, password, \
                                                             email, tenant_id, enabled)
        return user_info
def updateMeta():
    logger = logging.getLogger('EucaMeta')
    fileHandler = logging.FileHandler('/var/log/euca_meta.log')
    fileHandler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
    logger.addHandler(fileHandler)
    fileHandler.setLevel(logging.DEBUG)
    logger.setLevel(logging.DEBUG)

    while True:
        sleep(30)

        # Get IDs of the instances that don't have IPs yet.
        dbResults = Meta.select(
                      AND(Meta.q.pri_addr == None,
                          Meta.q.state    != 'deleted')
                    )
        dbResults = list(dbResults)
        logger.debug('[update process] dbResults: %s' % dbResults)
        instids = []
        for r in dbResults:
            if not r.instance:
                continue
            instids.append(r.instance.instance_id)
        logger.debug('[update process] Instance Id: %s' % ', '.join(instids))

        # Get instance information from Eucalyptus
        conn = getEucaConnection()
        vmInstances = []
        reservations = conn.get_all_instances(instids)
        for reservation in reservations:
            vmInstances += reservation.instances

        # Check the IPs
        instIPs = [ {'id':i.id, 'pri_addr':i.private_dns_name, 'pub_addr':i.public_dns_name}
                    for i in vmInstances if i.private_dns_name != '0.0.0.0' ]
        logger.debug('[update process] IP dict: %s' % str(instIPs))

        # Update the local DB
        for ipData in instIPs:
            dbInst = EucaInstance.select(EucaInstance.q.instance_id == ipData['id']).getOne(None)
            if not dbInst:
                logger.info('[update process] Could not find %s in DB' % ipData['id'])
                continue
            dbInst.meta.pri_addr = ipData['pri_addr']
            dbInst.meta.pub_addr = ipData['pub_addr']
            dbInst.meta.state    = 'running'

        dumpInstanceInfo()
Esempio n. 37
0
 def update_relation (self, subject_type, target_type, relation_name, subject_id, target_ids):
     
     if subject_type =='slice' and target_type == 'user' and relation_name == 'researcher':
         subject=self.shell.getSlices ({'slice_id': subject_id}, [])[0]
         current_target_ids = subject['user_ids']
         add_target_ids = list ( set (target_ids).difference(current_target_ids))
         del_target_ids = list ( set (current_target_ids).difference(target_ids))
         logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
         for target_id in add_target_ids:
             self.shell.addUserToSlice ({'user_id': target_id, 'slice_id': subject_id})
             logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
         for target_id in del_target_ids:
             logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
             self.shell.deleteUserFromSlice ({'user_id': target_id, 'slice_id': subject_id})
     else:
         logger.info('unexpected relation %s to maintain, %s -> %s'%(relation_name,subject_type,target_type))
Esempio n. 38
0
 def update_relation (self, subject_type, target_type, subject_id, target_ids):
     # hard-wire the code for slice/user for now, could be smarter if needed
     if subject_type =='slice' and target_type == 'user':
         subject=self.shell.project_get(subject_id)[0]
         current_target_ids = [user.name for user in subject.members]
         add_target_ids = list ( set (target_ids).difference(current_target_ids))
         del_target_ids = list ( set (current_target_ids).difference(target_ids))
         logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
         for target_id in add_target_ids:
             self.shell.project_add_member(target_id,subject_id)
             logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
         for target_id in del_target_ids:
             logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
             self.shell.project_remove_member(target_id, subject_id)
     else:
         logger.info('unexpected relation to maintain, %s -> %s'%(subject_type,target_type))
Esempio n. 39
0
    def node_to_rspec_node(self, node, options={}):
        rspec_node = NodeElement()
        site = self.driver.testbedInfo
        rspec_node['component_id'] = hostname_to_urn(self.driver.hrn,
                                                     site['name'],
                                                     node['hostname'])
        rspec_node['component_name'] = node['hostname']
        rspec_node['ip'] = node['ip']
        rspec_node['protocol'] = node['protocol']
        rspec_node['port'] = node['port']
        rspec_node['component_manager_id'] = Xrn(self.driver.hrn,
                                                 'authority+cm').get_urn()
        rspec_node['authority_id'] = hrn_to_urn(
            unigetestbedXrn.site_hrn(self.driver.hrn, site['name']),
            'authority+sa')
        #distinguish between Shared and Reservable nodes
        rspec_node['exclusive'] = 'false'

        rspec_node['hardware_types'] = [
            HardwareType({'name': 'endpoint'}),
            HardwareType({'name': 'sensor'})
        ]

        resources = []
        for resource in node['resources']:
            resources.append(
                Resource({
                    'name': resource.get('name'),
                    'path': resource.get('path'),
                    'type': resource.get('type'),
                    'unit': resource.get('unit'),
                    'data_type': resource.get('datatype')
                }))

        rspec_node['resources'] = resources
        logger.info(rspec_node)

        if site['longitude'] and site['latitude']:
            location = Location({
                'longitude': site['longitude'],
                'latitude': site['latitude'],
                'country': 'unknown'
            })
            rspec_node['location'] = location

        logger.info(rspec_node)
        return rspec_node
Esempio n. 40
0
    def get_version(self):
        ### if we already know the answer:
        if self.probed:
            return self._version
        ### otherwise let's look in the cache file
        logger.debug("searching in version cache %s" % self.url())
        cached_version = VersionCache().get(self.url())
        if cached_version is not None:
            logger.info("Retrieved version info from cache %s" % self.url())
            return cached_version
        ### otherwise let's do the hard work
        # dummy to meet Sfi's expectations for its 'options' field
        class DummyOptions:
            pass

        options = DummyOptions()
        options.verbose = self.verbose
        options.timeout = 10
        try:
            client = Sfi(options)
            client.read_config()
            client.bootstrap()
            key_file = client.private_key
            cert_file = client.my_gid
            logger.debug("using key %s & cert %s" % (key_file, cert_file))
            url = self.url()
            logger.info('issuing GetVersion at %s' % url)
            # setting timeout here seems to get the call to fail - even though the response time is fast
            #server=SfaServerProxy(url, key_file, cert_file, verbose=self.verbose, timeout=options.timeout)
            server = SfaServerProxy(url,
                                    key_file,
                                    cert_file,
                                    verbose=self.verbose)
            self._version = ReturnValue.get_value(server.GetVersion())
        except:
            logger.log_exc("failed to get version")
            self._version = {}
        # so that next run from this process will find out
        self.probed = True
        # store in version cache so next processes will remember for an hour
        cache = VersionCache()
        cache.set(self.url(), self._version)
        cache.save()
        logger.debug("Saved version for url=%s in version cache" % self.url())
        # that's our result
        return self._version
Esempio n. 41
0
    def create_router(self, tenant_id):
        is_router = True
        # checking whether the created router exist
        routers = self.driver.shell.network_manager.list_routers()
        routers = routers['routers']
        for router in routers:
            if router.get('tenant_id') == tenant_id:
                router_id = router.get('id')
                router = self.driver.shell.network_manager.show_router(router_id)
                is_router = False

        if is_router:
            config = OSConfig()
            # Information of public network(external network) from configuration file
            extnet_name = config.get('network', 'external_network_name')
            # find the network information related with a new interface
            networks = self.driver.shell.network_manager.list_networks()
            networks = networks['networks']
            for network in networks:
                if (network.get('name') == extnet_name) or \
                   (network.get('name') == 'public') or (network.get('name') == 'ext-net'):
                    pub_net_id = network.get('id')

            # Information of subnet network name from configuration file
            subnet_name = config.get('subnet', 'name')
            subnets = self.driver.shell.network_manager.list_subnets()
            subnets = subnets['subnets']
            for subnet in subnets:
                if ((subnet.get('name') == subnet_name) or (subnet.get('name') == 'private-subnet')) and \
                   (subnet.get('tenant_id') == tenant_id):
                    pri_sbnet_id = subnet.get('id')

            # create a router and connect external gateway related with public network
            r_body = {'router': {'name': 'router', 'admin_state_up': True,
                                 'external_gateway_info':{'network_id': pub_net_id}}}
            router = self.driver.shell.network_manager.create_router(body=r_body)

            # create a internal port of the router
            router_id = router['router']['id']
            int_pt_body = {'subnet_id': pri_sbnet_id}
            int_port = self.driver.shell.network_manager.add_interface_router(
                                                         router=router_id, body=int_pt_body)
            logger.info("Created a router with interfaces")

        return router
Esempio n. 42
0
 def update_relation(self, subject_type, target_type, relation_name, subject_id, target_ids):
     # hard-wire the code for slice/user for now, could be smarter if needed
     if subject_type == "slice" and target_type == "user" and relation_name == "researcher":
         subject = self.shell.GetSlices({"slice_id": subject_id})[0]
         if "user_ids" not in subject.keys():
             subject["user_ids"] = []
         current_target_ids = subject["user_ids"]
         add_target_ids = list(set(target_ids).difference(current_target_ids))
         del_target_ids = list(set(current_target_ids).difference(target_ids))
         logger.debug("subject_id = %s (type=%s)" % (subject_id, type(subject_id)))
         for target_id in add_target_ids:
             self.shell.AddUserToSlice({"user_id": target_id, "slice_id": subject_id})
             logger.debug("add_target_id = %s (type=%s)" % (target_id, type(target_id)))
         for target_id in del_target_ids:
             logger.debug("del_target_id = %s (type=%s)" % (target_id, type(target_id)))
             self.shell.DeleteUserFromSlice({"user_id": target_id, "slice_id": subject_id})
     else:
         logger.info("unexpected relation %s to maintain, %s -> %s" % (relation_name, subject_type, target_type))
Esempio n. 43
0
File: model.py Progetto: aquila/sfa
def make_record_dict (record_dict):
    assert ('type' in record_dict)
    type=record_dict['type'].split('+')[0]
    if type=='authority':
        result=RegAuthority (dict=record_dict)
    elif type=='user':
        result=RegUser (dict=record_dict)
    elif type=='slice':
        result=RegSlice (dict=record_dict)
    elif type=='node':
        result=RegNode (dict=record_dict)
    else:
        logger.debug("Untyped RegRecord instance")
        result=RegRecord (dict=record_dict)
    logger.info ("converting dict into Reg* with type=%s"%type)
    logger.info ("returning=%s"%result)
    # xxx todo
    # register non-db attributes in an extensions field
    return result
Esempio n. 44
0
def make_record_dict(record_dict):
    assert ('type' in record_dict)
    type = record_dict['type'].split('+')[0]
    if type == 'authority':
        result = RegAuthority(dict=record_dict)
    elif type == 'user':
        result = RegUser(dict=record_dict)
    elif type == 'slice':
        result = RegSlice(dict=record_dict)
    elif type == 'node':
        result = RegNode(dict=record_dict)
    else:
        logger.debug("Untyped RegRecord instance")
        result = RegRecord(dict=record_dict)
    logger.info("converting dict into Reg* with type=%s" % type)
    logger.info("returning=%s" % result)
    # xxx todo
    # register non-db attributes in an extensions field
    return result
Esempio n. 45
0
    def get_rspec(self, slice_xrn=None, version=None, options={}):

        version_manager = VersionManager()
        version = version_manager.get_version(version)

        if not slice_xrn:
            rspec_version = version_manager._get_version(
                version.type, version.version, 'ad')
        else:
            rspec_version = version_manager._get_version(
                version.type, version.version, 'manifest')

        slice, slivers = self.get_slice_and_slivers(slice_xrn)

        rspec = RSpec(version=rspec_version, user_options=options)

        if slice and 'expires' in slice:
            rspec.xml.set('expires',
                          datetime_to_string(utcparse(slice['expires'])))

        if not options.get('list_leases') or options.get(
                'list_leases') and options['list_leases'] != 'leases':
            nodes = self.get_nodes(slice_xrn, slice, slivers, options)
            rspec.version.add_nodes(nodes)
            # add sliver defaults
            default_sliver = slivers.get(None, [])
            if default_sliver:
                default_sliver_attribs = default_sliver.get('tags', [])
                for attrib in default_sliver_attribs:
                    logger.info(attrib)
                    rspec.version.add_default_sliver_attribute(
                        attrib['tagname'], attrib['value'])
            # add wifi channels
            channels = self.get_channels(slice, options)
            rspec.version.add_channels(channels)

        if not options.get('list_leases') or options.get(
                'list_leases') and options['list_leases'] != 'resources':
            leases_channels = self.get_leases_and_channels(slice, slice_xrn)
            rspec.version.add_leases(leases_channels)

        return rspec.toxml()
Esempio n. 46
0
    def __init__ ( self, config ) :
        url = config.SFA_PLC_URL
        # try to figure if the url is local
        hostname=urlparse(url).hostname
        is_local=False
        if hostname == 'localhost': is_local=True
        # otherwise compare IP addresses; 
        # this might fail for any number of reasons, so let's harden that
        try:
            # xxx todo this seems to result in a DNS request for each incoming request to the AM
            # should be cached or improved
            url_ip=socket.gethostbyname(hostname)
            local_ip=socket.gethostbyname(socket.gethostname())
            if url_ip==local_ip: is_local=True
        except:
            pass

        if is_local:
            try:
                # too bad this is not installed properly
                plcapi_path="/usr/share/plc_api"
                if plcapi_path not in sys.path: sys.path.append(plcapi_path)
                import PLC.Shell
                plc_direct_access=True
            except:
                plc_direct_access=False
        if is_local and plc_direct_access:
            logger.info('plshell access - capability')
            self.plauth = { 'AuthMethod': 'capability',
                            'Username':   str(config.SFA_PLC_USER),
                            'AuthString': str(config.SFA_PLC_PASSWORD),
                            }
            self.proxy = PLC.Shell.Shell ()

        else:
            logger.info('plshell access - xmlrpc')
            self.plauth = { 'AuthMethod': 'password',
                            'Username':   str(config.SFA_PLC_USER),
                            'AuthString': str(config.SFA_PLC_PASSWORD),
                            }
            self.proxy = xmlrpclib.Server(url, verbose = False, allow_none = True)
Esempio n. 47
0
 def update_relation(self, subject_type, target_type, subject_id,
                     target_ids):
     # hard-wire the code for slice/user for now, could be smarter if needed
     if subject_type == 'slice' and target_type == 'user':
         subject = self.shell.project_get(subject_id)[0]
         current_target_ids = [user.name for user in subject.members]
         add_target_ids = list(
             set(target_ids).difference(current_target_ids))
         del_target_ids = list(
             set(current_target_ids).difference(target_ids))
         logger.debug("subject_id = %s (type=%s)" %
                      (subject_id, type(subject_id)))
         for target_id in add_target_ids:
             self.shell.project_add_member(target_id, subject_id)
             logger.debug("add_target_id = %s (type=%s)" %
                          (target_id, type(target_id)))
         for target_id in del_target_ids:
             logger.debug("del_target_id = %s (type=%s)" %
                          (target_id, type(target_id)))
             self.shell.project_remove_member(target_id, subject_id)
     else:
         logger.info('unexpected relation to maintain, %s -> %s' %
                     (subject_type, target_type))
Esempio n. 48
0
    def DeleteSlice(self, slice_record):
        """Deletes the specified slice and kills the jobs associated with
            the slice if any,  using DeleteSliceFromNodes.

        :param slice_record: record of the slice, must contain oar_job_id, user
        :type slice_record: dict
        :returns: True if all the jobs in the slice have been deleted,
            or the list of jobs that could not be deleted otherwise.
        :rtype: list or boolean

         .. seealso:: DeleteSliceFromNodes

        """
        ret = self.DeleteSliceFromNodes(slice_record)
        delete_failed = None
        for job_id in ret:
            if False in ret[job_id]:
                if delete_failed is None:
                    delete_failed = []
                delete_failed.append(job_id)

        logger.info("IOTLAB_API DeleteSlice %s  answer %s"%(slice_record, \
                    delete_failed))
        return delete_failed or True
Esempio n. 49
0
        def AddSlice(self,slice):
			logger.info("AddSlice")
			logger.info(slice)
			if not isinstance(slice, dict):
				return False
			for key in slice.keys():
				if key not in['slice_name','user_ids','node_ids','enabled','expired']:
					return False

			slice['slice_id'] = self.indexes['slices_index']
			slice['expires'] = int(time.time())+60*60*24*30
			self.indexes['slices_index']+=1
			self.slices.append(slice)

			logger.info(self.slices)
			return slice['slice_id']
Esempio n. 50
0
    def main(self):
        usage = "%prog [options] url-entry-point(s)"
        parser = OptionParser(usage=usage)
        parser.add_option("-d",
                          "--dir",
                          dest="sfi_dir",
                          help="config & working directory - default is " +
                          Sfi.default_sfi_dir(),
                          metavar="PATH",
                          default=Sfi.default_sfi_dir())
        parser.add_option(
            "-o",
            "--output",
            action='append',
            dest='outfiles',
            default=[],
            help="output filenames (cumulative) - defaults are %r" %
            SfaScan.default_outfiles)
        parser.add_option("-l",
                          "--left-to-right",
                          action="store_true",
                          dest="left_to_right",
                          default=False,
                          help="instead of top-to-bottom")
        parser.add_option("-v",
                          "--verbose",
                          action="count",
                          dest="verbose",
                          default=0,
                          help="verbose - can be repeated for more verbosity")
        parser.add_option("-c",
                          "--clean-cache",
                          action='store_true',
                          dest='clean_cache',
                          default=False,
                          help='clean/trash version cache and exit')
        parser.add_option("-s",
                          "--show-cache",
                          action='store_true',
                          dest='show_cache',
                          default=False,
                          help='show/display version cache')

        (options, args) = parser.parse_args()
        logger.enable_console()
        # apply current verbosity to logger
        logger.setLevelFromOptVerbose(options.verbose)
        # figure if we need to be verbose for these local classes that only have a bool flag
        bool_verbose = logger.getBoolVerboseFromOpt(options.verbose)

        if options.show_cache:
            VersionCache().show()
            sys.exit(0)
        if options.clean_cache:
            VersionCache().clean()
            sys.exit(0)
        if not args:
            parser.print_help()
            sys.exit(1)

        if not options.outfiles:
            options.outfiles = SfaScan.default_outfiles
        scanner = Scanner(left_to_right=options.left_to_right,
                          verbose=bool_verbose)
        entries = [
            Interface(entry, mentioned_in="command line") for entry in args
        ]
        try:
            g = scanner.graph(entries)
            logger.info("creating layout")
            g.layout(prog='dot')
            for outfile in options.outfiles:
                logger.info("drawing in %s" % outfile)
                g.draw(outfile)
            logger.info("done")
        # test mode when pygraphviz is not available
        except:
            entry = entries[0]
            print "GetVersion at %s returned %s" % (entry.url(),
                                                    entry.get_version())
Esempio n. 51
0
def make_record_xml(xml):
    xml_record = XML(xml)
    xml_dict = xml_record.todict()
    logger.info("load from xml, keys=%s" % xml_dict.keys())
    return make_record_dict(xml_dict)
Esempio n. 52
0
def drop_tables(engine):
    logger.info("Dropping tables from current/latest model")
    Base.metadata.drop_all(engine)
Esempio n. 53
0
def init_tables(engine):
    logger.info("Initializing db schema from current/latest model")
    Base.metadata.create_all(engine)
Esempio n. 54
0
    def Resolve(self, api, xrns, type=None, details=False):

        if not isinstance(xrns, types.ListType):
            # try to infer type if not set and we get a single input
            if not type:
                type = Xrn(xrns).get_type()
            xrns = [xrns]
        hrns = [urn_to_hrn(xrn)[0] for xrn in xrns]

        # load all known registry names into a prefix tree and attempt to find
        # the longest matching prefix
        # create a dict where key is a registry hrn and its value is a list
        # of hrns at that registry (determined by the known prefix tree).  
        xrn_dict = {}
        registries = api.registries
        tree = prefixTree()
        registry_hrns = registries.keys()
        tree.load(registry_hrns)
        for xrn in xrns:
            registry_hrn = tree.best_match(urn_to_hrn(xrn)[0])
            if registry_hrn not in xrn_dict:
                xrn_dict[registry_hrn] = []
            xrn_dict[registry_hrn].append(xrn)

        records = []
        for registry_hrn in xrn_dict:
            # skip the hrn without a registry hrn
            # XX should we let the user know the authority is unknown?       
            if not registry_hrn:
                continue

            # if the best match (longest matching hrn) is not the local registry,
            # forward the request
            xrns = xrn_dict[registry_hrn]
            if registry_hrn != api.hrn:
                credential = api.getCredential()
                interface = api.registries[registry_hrn]
                server_proxy = api.server_proxy(interface, credential)
                # should propagate the details flag but that's not supported in the xmlrpc interface yet
                #peer_records = server_proxy.Resolve(xrns, credential,type, details=details)
                peer_records = server_proxy.Resolve(xrns, credential)
                # pass foreign records as-is
                # previous code used to read
                # records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
                # not sure why the records coming through xmlrpc had to be processed at all
                records.extend(peer_records)

        # try resolving the remaining unfound records at the local registry
        local_hrns = list ( set(hrns).difference([record['hrn'] for record in records]) )
        # 
        local_records = dbsession.query(RegRecord).filter(RegRecord.hrn.in_(local_hrns))
        if type:
            local_records = local_records.filter_by(type=type)
        local_records=local_records.all()

        for local_record in local_records:
            augment_with_sfa_builtins (local_record)

        logger.info("Resolve, (details=%s,type=%s) local_records=%s "%(details,type,local_records))
        local_dicts = [ record.__dict__ for record in local_records ]

        if details:
            # in details mode we get as much info as we can, which involves contacting the 
            # testbed for getting implementation details about the record
            self.driver.augment_records_with_testbed_info(local_dicts)
            # also we fill the 'url' field for known authorities
            # used to be in the driver code, sounds like a poorman thing though
            def solve_neighbour_url (record):
                if not record.type.startswith('authority'): return
                hrn=record.hrn
                for neighbour_dict in [ api.aggregates, api.registries ]:
                    if hrn in neighbour_dict:
                        record.url=neighbour_dict[hrn].get_url()
                        return
            for record in local_records: 
                solve_neighbour_url (record)

        # convert local record objects to dicts for xmlrpc
        # xxx somehow here calling dict(record) issues a weird error
        # however record.todict() seems to work fine
        # records.extend( [ dict(record) for record in local_records ] )
        records.extend( [ record.todict(exclude_types=[InstrumentedList]) for record in local_records ] )

        if not records:
            raise RecordNotFound(str(hrns))

        return records
Esempio n. 55
0
 def validate_datetime(self, key, incoming):
     if isinstance(incoming, datetime): return incoming
     elif isinstance(incoming, (int, float)):
         return datetime.fromtimestamp(incoming)
     else:            logger.info("Cannot validate datetime for key %s with input %s"%\
                         (key,incoming))