Exemplo n.º 1
0
def check_port_consistency():

    print
    print "Checking that all ports defined on br-int are associated with quantum/neutron ports"

    # Get list of all currently defined quantum/neutron ports
    port_data = osi._getPortsForTenant(None)
    short_port_names = {}
    for port_name in port_data.keys():
        short_name = port_name[:11]
        short_port_names[short_name] = port_name

#    print short_port_names
#    print port_data

    hosts = osi._listHosts(onlyForService='compute')
    for host in hosts:
        switch_data = compute_node_command(host, ComputeNodeInterfaceHandler.COMMAND_OVS_OFCTL);
        switch_data_lines = switch_data.split('\n')
        for i in range(len(switch_data_lines)):
            line = switch_data_lines[i]
            if line.find('qvo') >= 0:
                line_parts = line.split('(')
                port_name_part = line_parts[1]
                port_name = port_name_part.split(')')[0]
                short_name = port_name[3:]
                print port_name + " " + host + " *** "
Exemplo n.º 2
0
def get_host_status():    
    compute_hosts = osi._listHosts('compute').keys()
#    print "HOSTS = " + str(compute_hosts)

    status = {}
    cmd = "nova-manage service list"
    ret = subprocess.check_output(cmd, shell=True)
    lines = ret.split('\n');
    for host in compute_hosts:
        found = 0
        for line in lines:
            if line.find(host):
                if line.find(':-)'):
                    status[host] = ':-)'
                    found = 0
                    continue
                else:
                    status[host] = 'xxx'
            if found == 0:
                status[host] = 'xxx'
          

        #status[host] = compute_host_ping_status(host)

    return  status
Exemplo n.º 3
0
def get_nova_status():
    success = False
    try:
        hosts = osi._listHosts('compute')
        success = True
    except:
        pass
    return success
Exemplo n.º 4
0
def get_nova_status():
    success = False
    try:
        hosts = osi._listHosts('compute')
        success = True
    except:
        pass
    return success
Exemplo n.º 5
0
  def refresh():
      if not GramImageInfo._compute_hosts:
          GramImageInfo._compute_hosts = open_stack_interface._listHosts('compute')

      cmd = 'nova image-list'
      try :
          GramImageInfo._image_list = _execCommand(cmd)
          GramImageInfo._last_update = datetime.datetime.utcnow()
      except :
          config.logger.error('Failed to execute "nova image-list"')
      cmd = 'nova flavor-list'
      try:
          GramImageInfo._flavor_list = _execCommand(cmd)
      except:
          config.logger.error('Failed to execute "nova flavor-list"')
Exemplo n.º 6
0
    def refresh():
        if not GramImageInfo._compute_hosts:
            GramImageInfo._compute_hosts = open_stack_interface._listHosts(
                'compute')

        cmd = 'nova image-list'
        try:
            GramImageInfo._image_list = _execCommand(cmd)
            GramImageInfo._last_update = datetime.datetime.utcnow()
        except:
            config.logger.error('Failed to execute "nova image-list"')
        cmd = 'nova flavor-list'
        try:
            GramImageInfo._flavor_list = _execCommand(cmd)
        except:
            config.logger.error('Failed to execute "nova flavor-list"')
Exemplo n.º 7
0
def get_host_status():
    compute_hosts = osi._listHosts('compute').keys()
    #    print "HOSTS = " + str(compute_hosts)

    status = {}
    cmd = "nova-manage service list"
    ret = subprocess.check_output(cmd, shell=True)
    lines = ret.split('\n')
    for host in compute_hosts:
        found = 0
        for line in lines:
            if line.find(host):
                if line.find(':-)'):
                    status[host] = ':-)'
                    found = 0
                    continue
                else:
                    status[host] = 'xxx'
            if found == 0:
                status[host] = 'xxx'

        #status[host] = compute_host_ping_status(host)

    return status
Exemplo n.º 8
0
    def allocate(self, slice_urn, creds, rspec, options):
        """
            AM API V3 method.

            Request reservation of GRAM resources.  We assume that by the 
            time we get here the caller's credentials have been verified 
            by the gcf framework (see am3.py).

            Returns None if successful.
            Returns an error string on failure.
        """
        config.logger.info('Allocate called for slice %r' % slice_urn)

        # Grab user urn out of slice credentail
        user_urn = None
        if len(creds) == 1:
            user_urn = creds[0].gidCaller.urn

        # Check if we already have slivers for this slice
        slice_object = SliceURNtoSliceObject.get_slice_object(slice_urn)
        if slice_object == None:
            # This is a new slice at this aggregate.  Create Slice object
            # and add it the list of slices at this AM
            slice_object = Slice(slice_urn)
            SliceURNtoSliceObject.set_slice_object(slice_urn, slice_object)

        # Lock this slice so nobody else can mess with it during allocation
        with slice_object.getLock():
            # Parse the request rspec.  Get back any error message from parsing
            # the rspec and a list of slivers created while parsing
            # Also OF controller, if any
            err_output, err_code, slivers, controller_link_info = \
                rspec_handler.parseRequestRspec(self._aggregate_urn,
                                                slice_object, rspec,
                                                self._stitching)

            if err_output != None:
                # Something went wrong.  First remove from the slice any sliver
                # objects created while parsing the bad rspec
                self.cleanup_slivers(slivers, slice_object)

                # Return an error struct.
                code = {'geni_code': err_code}
                return {'code': code, 'value': '', 'output': err_output}

            # If we're associating an OpenFlow controller to
            # any link of this slice,
            # Each VM must go on its own host. If there are more nodes
            # than hosts, we fail
            if len(controller_link_info) > 0:
                hosts = open_stack_interface._listHosts('compute')
                num_vms = 0
                for sliver in slivers:
                    if isinstance(sliver, VirtualMachine):
                        num_vms = num_vms + 1
                if len(hosts) < num_vms:
                    # Fail: More VMs requested than compute hosts
                    # on rack.  Remove from this slice the sliver
                    # objects created during this call to allocate
                    # before returning an error struct
                    self.cleanup_slivers(slivers, slice_object)
                    code = {'geni_code': constants.REQUEST_PARSE_FAILED}
                    error_output = \
                        "For OpenFlow controlled slice, limit of " + \
                        str(len(hosts)) + " VM's"
                    return {'code': code, 'value': '', 'output': error_output}

            # Set the experimenter provider controller URL (if any)
            for link_object in slice_object.getNetworkLinks():
                link_name = link_object.getName()
                if link_name in controller_link_info:
                    controller_url_for_link = controller_link_info[link_name]
                    link_object.setControllerURL(controller_url_for_link)

            # Set expiration times on the allocated resources
            expiration = utils.min_expire(
                creds, self._max_alloc_time, 'geni_end_time' in options
                and options['geni_end_time'])
            for sliver in slivers:
                sliver.setExpiration(expiration)

                # Set expiration time on the slice itself
                slice_object.setExpiration(expiration)

            # Associate an external VLAN tag with every
            # stitching link
#            print 'allocating external vlan'
# Allocate external vlans and set them on the slivers
            is_v2_allocation = 'AM_API_V2' in options
            for link_sliver_object in slice_object.getNetworkLinks():
                success, error_string, error_code = \
                    self._stitching.allocate_external_vlan_tags(link_sliver_object, \
                                                                    rspec, is_v2_allocation)
                if not success:
                    self.cleanup_slivers(slivers, slice_object)
                    return {
                        'code': {
                            'geni_code': error_code
                        },
                        'value': "",
                        'output': error_string
                    }

            # Associate an internal VLAN tag with every link
            # that isn't already set by stitching
#            print 'allocating internal vlan'
            if not self.allocate_internal_vlan_tags(slice_object):
                self.cleanup_slivers(slivers, slice_object)
                error_string = "No more internal VLAN tags available"
                error_code = constants.VLAN_UNAVAILABLE
                return {
                    'code': {
                        'geni_code': error_code
                    },
                    'value': "",
                    'output': error_string
                }

            # Generate a manifest rspec
            slice_object.setRequestRspec(rspec)
            for sliver in slivers:
                sliver.setRequestRspec(rspec)
            agg_urn = self._aggregate_urn
            # At this point, we don't allocate VLAN's: they should already be allocated
            manifest, error_string, error_code =  \
                rspec_handler.generateManifestForSlivers(slice_object, \
                                                             slivers, True, \
                                                             False,
                                                             agg_urn, \
                                                             self._stitching)
            if error_code != constants.SUCCESS:
                self.cleanup_slivers(slivers, slice_object)
                return {
                    'code': {
                        'geni_code': error_code
                    },
                    'value': "",
                    'output': error_string
                }

            slice_object.setManifestRspec(manifest)

            # Set the user urn for all new slivers
            all_slice_slivers = slice_object.getAllSlivers()
            for sliver_urn in all_slice_slivers:
                sliver = all_slice_slivers[sliver_urn]
                if not sliver.getUserURN():
                    sliver.setUserURN(user_urn)

            # Persist aggregate state
            self.persist_state()

            # Create a sliver status list for the slivers allocated by this call
            sliver_status_list = \
                utils.SliverList().getStatusOfSlivers(slivers)

            # Generate the return struct
            code = {'geni_code': constants.SUCCESS}
            result_struct = {
                'geni_rspec': manifest,
                'geni_slivers': sliver_status_list
            }
            return {'code': code, 'value': result_struct, 'output': ''}
Exemplo n.º 9
0
    def allocate(self, slice_urn, creds, rspec, options) :

        """
            AM API V3 method.

            Request reservation of GRAM resources.  We assume that by the 
            time we get here the caller's credentials have been verified 
            by the gcf framework (see am3.py).

            Returns None if successful.
            Returns an error string on failure.
        """
        config.logger.info('Allocate called for slice %r' % slice_urn)

        # Grab user urn out of slice credentail
        user_urn  = None
        if len(creds) == 1:
            user_urn = creds[0].gidCaller.urn

        # Check if we already have slivers for this slice
        slice_object = SliceURNtoSliceObject.get_slice_object(slice_urn)
        if slice_object == None :
            # This is a new slice at this aggregate.  Create Slice object 
            # and add it the list of slices at this AM
            slice_object = Slice(slice_urn)
            SliceURNtoSliceObject.set_slice_object(slice_urn, slice_object)

        # Lock this slice so nobody else can mess with it during allocation
        with slice_object.getLock() :
            # Parse the request rspec.  Get back any error message from parsing
            # the rspec and a list of slivers created while parsing
            # Also OF controller, if any
            err_output, err_code, slivers, controller_link_info = \
                rspec_handler.parseRequestRspec(self._aggregate_urn,
                                                slice_object, rspec, 
                                                self._stitching)

            if err_output != None :
                # Something went wrong.  First remove from the slice any sliver
                # objects created while parsing the bad rspec
                self.cleanup_slivers(slivers, slice_object)
                
                # Return an error struct.
                code = {'geni_code': err_code}
                return {'code': code, 'value': '', 'output': err_output}

            # If we're associating an OpenFlow controller to 
            # any link of this slice, 
            # Each VM must go on its own host. If there are more nodes
            # than hosts, we fail
            if len(controller_link_info) > 0:
                hosts = open_stack_interface._listHosts('compute')
                num_vms = 0
                for sliver in slivers:
                    if isinstance(sliver, VirtualMachine):
                        num_vms = num_vms + 1
                if len(hosts) < num_vms:
                    # Fail: More VMs requested than compute hosts 
                    # on rack.  Remove from this slice the sliver 
                    # objects created during this call to allocate 
                    # before returning an error struct
                    self.cleanup_slivers(slivers, slice_object)
                    code =  {'geni_code': constants.REQUEST_PARSE_FAILED}
                    error_output = \
                        "For OpenFlow controlled slice, limit of " + \
                        str(len(hosts)) + " VM's"
                    return {'code': code, 'value':'', 
                                'output':error_output}
        
            # Set the experimenter provider controller URL (if any)
            for link_object in slice_object.getNetworkLinks():
                link_name = link_object.getName()
                if link_name in controller_link_info:
                    controller_url_for_link = controller_link_info[link_name]
                    link_object.setControllerURL(controller_url_for_link)

            # Set expiration times on the allocated resources
            expiration = utils.min_expire(creds, 
                         self._max_alloc_time,
                         'geni_end_time' in options and options['geni_end_time'])
            for sliver in slivers :
                sliver.setExpiration(expiration)

            # Set expiration time on the slice itself
                slice_object.setExpiration(expiration);

            # Associate an external VLAN tag with every 
            # stitching link
#            print 'allocating external vlan'
            # Allocate external vlans and set them on the slivers
            is_v2_allocation = 'AM_API_V2' in options
            for link_sliver_object in slice_object.getNetworkLinks():
                success, error_string, error_code = \
                    self._stitching.allocate_external_vlan_tags(link_sliver_object, \
                                                                    rspec, is_v2_allocation)
                if not success:
                    self.cleanup_slivers(slivers, slice_object)
                    return {'code' : {'geni_code' : error_code}, 'value' : "",
                            'output' : error_string}

            # Associate an internal VLAN tag with every link 
            # that isn't already set by stitching
#            print 'allocating internal vlan'
            if not self.allocate_internal_vlan_tags(slice_object):
                self.cleanup_slivers(slivers, slice_object)
                error_string = "No more internal VLAN tags available"
                error_code = constants.VLAN_UNAVAILABLE
                return {'code' : {'geni_code' : error_code}, 'value' : "",
                        'output' : error_string}
 
            # Generate a manifest rspec
            slice_object.setRequestRspec(rspec)
            for sliver in slivers:
                sliver.setRequestRspec(rspec);
            agg_urn = self._aggregate_urn
            # At this point, we don't allocate VLAN's: they should already be allocated
            manifest, error_string, error_code =  \
                rspec_handler.generateManifestForSlivers(slice_object, \
                                                             slivers, True, \
                                                             False,
                                                             agg_urn, \
                                                             self._stitching)
            if error_code != constants.SUCCESS:
                self.cleanup_slivers(slivers, slice_object)
                return {'code' : {'geni_code' : error_code}, 'value' : "", 
                        'output' : error_string}

            slice_object.setManifestRspec(manifest)

            # Set the user urn for all new slivers
            all_slice_slivers = slice_object.getAllSlivers()
            for sliver_urn in all_slice_slivers:
                sliver = all_slice_slivers[sliver_urn]
                if not sliver.getUserURN():
                    sliver.setUserURN(user_urn)

            # Persist aggregate state
            self.persist_state()

            # Create a sliver status list for the slivers allocated by this call
            sliver_status_list = \
                utils.SliverList().getStatusOfSlivers(slivers)


            # Generate the return struct
            code = {'geni_code': constants.SUCCESS}
            result_struct = {'geni_rspec':manifest,
                             'geni_slivers':sliver_status_list}
            return {'code': code, 'value': result_struct, 'output': ''}